From 2512bfcd98001367f2725567d685763c8ee11b25 Mon Sep 17 00:00:00 2001
From: Fan Shang Xiang
Date: Fri, 13 Oct 2023 11:34:11 +0800
Subject: [PATCH 1/5] adopt track2 sdk in e2e test suites
---
go.mod | 74 +-
go.sum | 369 +-
...lly_provisioned_azuredisk_detach_tester.go | 8 +-
...ically_provisioned_resize_volume_tester.go | 2 +-
test/utils/azure/azure_helpers.go | 287 +-
.../azure-sdk-for-go/sdk/azcore/CHANGELOG.md | 150 +
.../azure-sdk-for-go/sdk/azcore/arm/client.go | 78 +
.../azure-sdk-for-go/sdk/azcore/arm/doc.go | 9 +
.../internal/resource/resource_identifier.go | 224 +
.../arm/internal/resource/resource_type.go | 114 +
.../sdk/azcore/arm/policy/policy.go | 98 +
.../sdk/azcore/arm/resource_identifier.go | 23 +
.../sdk/azcore/arm/resource_type.go | 40 +
.../sdk/azcore/arm/runtime/pipeline.go | 64 +
.../azcore/arm/runtime/policy_bearer_token.go | 145 +
.../azcore/arm/runtime/policy_register_rp.go | 346 +
.../sdk/azcore/arm/runtime/runtime.go | 24 +
.../Azure/azure-sdk-for-go/sdk/azcore/core.go | 93 +-
.../sdk/azcore/internal/exported/exported.go | 117 +-
.../sdk/azcore/internal/exported/request.go | 62 +-
.../internal/exported/response_error.go | 6 +-
.../azcore/internal/pollers/async/async.go | 17 +-
.../sdk/azcore/internal/pollers/body/body.go | 25 +-
.../sdk/azcore/internal/pollers/loc/loc.go | 19 +-
.../sdk/azcore/internal/pollers/op/op.go | 21 +-
.../sdk/azcore/internal/pollers/util.go | 148 +-
.../sdk/azcore/internal/shared/constants.go | 4 +-
.../sdk/azcore/internal/shared/shared.go | 111 +-
.../sdk/azcore/policy/policy.go | 61 +-
.../sdk/azcore/runtime/pager.go | 41 +
.../sdk/azcore/runtime/pipeline.go | 15 +-
.../sdk/azcore/runtime/policy_bearer_token.go | 84 +-
.../azcore/runtime/policy_body_download.go | 3 +-
.../sdk/azcore/runtime/policy_http_header.go | 3 +-
.../azcore/runtime/policy_include_response.go | 5 +-
.../azcore/runtime/policy_key_credential.go | 49 +
.../sdk/azcore/runtime/policy_logging.go | 40 +-
.../sdk/azcore/runtime/policy_request_id.go | 6 +-
.../sdk/azcore/runtime/policy_retry.go | 28 +-
.../azcore/runtime/policy_sas_credential.go | 39 +
.../sdk/azcore/runtime/poller.go | 9 +-
.../sdk/azcore/runtime/request.go | 14 +
.../sdk/azcore/runtime/response.go | 17 +-
.../runtime/transport_default_dialer_other.go | 15 +
.../runtime/transport_default_dialer_wasm.go | 15 +
.../runtime/transport_default_http_client.go | 7 +-
.../sdk/azcore/streaming/progress.go | 3 +
.../azure-sdk-for-go/sdk/azcore/to/doc.go | 9 +
.../azure-sdk-for-go/sdk/azcore/to/to.go | 21 +
.../sdk/azidentity/CHANGELOG.md | 132 +
.../azure-sdk-for-go/sdk/azidentity/README.md | 8 +-
.../sdk/azidentity/TROUBLESHOOTING.md | 22 +-
.../sdk/azidentity/assets.json | 6 +
.../sdk/azidentity/azidentity.go | 119 +-
.../sdk/azidentity/azure_cli_credential.go | 156 +-
.../azidentity/chained_token_credential.go | 19 +-
.../azure-sdk-for-go/sdk/azidentity/ci.yml | 25 +-
.../azidentity/client_assertion_credential.go | 37 +-
.../client_certificate_credential.go | 39 +-
.../azidentity/client_secret_credential.go | 38 +-
.../sdk/azidentity/confidential_client.go | 156 +
.../azidentity/default_azure_credential.go | 118 +-
.../sdk/azidentity/device_code_credential.go | 59 +-
.../sdk/azidentity/environment_credential.go | 42 +-
.../azure-sdk-for-go/sdk/azidentity/errors.go | 52 +-
.../interactive_browser_credential.go | 63 +-
.../sdk/azidentity/logging.go | 18 +-
.../sdk/azidentity/managed_identity_client.go | 61 +-
.../azidentity/managed_identity_credential.go | 26 +-
.../sdk/azidentity/on_behalf_of_credential.go | 92 +
.../sdk/azidentity/public_client.go | 178 +
.../sdk/azidentity/test-resources-pre.ps1 | 36 +
.../sdk/azidentity/test-resources.bicep | 1 +
.../username_password_credential.go | 44 +-
.../sdk/azidentity/version.go | 2 +-
.../sdk/azidentity/workload_identity.go | 126 +
.../sdk/internal/exported/exported.go | 124 +
.../sdk/internal/poller/util.go | 155 +
.../compute/armcompute/v5/CHANGELOG.md | 310 +
.../compute/armcompute/v5/LICENSE.txt} | 12 +-
.../compute/armcompute/v5/README.md | 139 +
.../compute/armcompute/v5/assets.json | 6 +
.../compute/armcompute/v5/autorest.md | 13 +
.../armcompute/v5/availabilitysets_client.go | 477 +
.../compute/armcompute/v5/build.go | 7 +
.../v5/capacityreservationgroups_client.go | 428 +
.../v5/capacityreservations_client.go | 420 +
.../compute/armcompute/v5/ci.yml | 28 +
.../compute/armcompute/v5/client_factory.go | 284 +
.../v5/cloudserviceoperatingsystems_client.go | 303 +
.../v5/cloudserviceroleinstances_client.go | 609 +
.../armcompute/v5/cloudserviceroles_client.go | 181 +
.../armcompute/v5/cloudservices_client.go | 943 +
.../v5/cloudservicesupdatedomain_client.go | 256 +
.../v5/communitygalleries_client.go | 105 +
.../v5/communitygalleryimages_client.go | 181 +
.../communitygalleryimageversions_client.go | 193 +
.../compute/armcompute/v5/constants.go | 2717 ++
.../v5/dedicatedhostgroups_client.go | 417 +
.../armcompute/v5/dedicatedhosts_client.go | 560 +
.../armcompute/v5/diskaccesses_client.go | 809 +
.../v5/diskencryptionsets_client.go | 522 +
.../armcompute/v5/diskrestorepoint_client.go | 354 +
.../compute/armcompute/v5/disks_client.go | 591 +
.../compute/armcompute/v5/galleries_client.go | 447 +
.../v5/galleryapplications_client.go | 411 +
.../v5/galleryapplicationversions_client.go | 441 +
.../armcompute/v5/galleryimages_client.go | 410 +
.../v5/galleryimageversions_client.go | 440 +
.../v5/gallerysharingprofile_client.go | 117 +
.../compute/armcompute/v5/images_client.go | 444 +
.../armcompute/v5/loganalytics_client.go | 186 +
.../compute/armcompute/v5/models.go | 8341 ++++
.../compute/armcompute/v5/models_serde.go | 19156 ++++++++
.../armcompute/v5/operations_client.go | 87 +
.../compute/armcompute/v5/options.go | 1901 +
.../v5/proximityplacementgroups_client.go | 415 +
.../armcompute/v5/resourceskus_client.go | 110 +
.../compute/armcompute/v5/response_types.go | 1646 +
.../v5/restorepointcollections_client.go | 437 +
.../armcompute/v5/restorepoints_client.go | 263 +
.../armcompute/v5/sharedgalleries_client.go | 173 +
.../v5/sharedgalleryimages_client.go | 183 +
.../v5/sharedgalleryimageversions_client.go | 196 +
.../compute/armcompute/v5/snapshots_client.go | 593 +
.../armcompute/v5/sshpublickeys_client.go | 473 +
.../compute/armcompute/v5/time_rfc3339.go | 86 +
.../compute/armcompute/v5/usage_client.go | 110 +
.../virtualmachineextensionimages_client.go | 247 +
.../v5/virtualmachineextensions_client.go | 405 +
.../v5/virtualmachineimages_client.go | 446 +
.../v5/virtualmachineimagesedgezone_client.go | 410 +
.../v5/virtualmachineruncommands_client.go | 540 +
.../armcompute/v5/virtualmachines_client.go | 1760 +
...virtualmachinescalesetextensions_client.go | 411 +
...almachinescalesetrollingupgrades_client.go | 318 +
.../v5/virtualmachinescalesets_client.go | 1751 +
...rtualmachinescalesetvmextensions_client.go | 430 +
...tualmachinescalesetvmruncommands_client.go | 439 +
.../v5/virtualmachinescalesetvms_client.go | 1223 +
.../v5/virtualmachinesizes_client.go | 104 +
.../armcontainerservice/v4/CHANGELOG.md | 145 +
.../armcontainerservice/v4/LICENSE.txt | 21 +
.../armcontainerservice/v4/README.md | 125 +
.../v4/agentpools_client.go | 613 +
.../armcontainerservice/v4/assets.json | 6 +
.../armcontainerservice/v4/autorest.md | 13 +
.../armcontainerservice/v4/build.go | 7 +
.../armcontainerservice/v4/ci.yml | 28 +
.../armcontainerservice/v4/client_factory.go | 78 +
.../armcontainerservice/v4/constants.go | 847 +
.../armcontainerservice/v4/date_type.go | 59 +
.../v4/maintenanceconfigurations_client.go | 306 +
.../v4/managedclusters_client.go | 1613 +
.../armcontainerservice/v4/models.go | 2062 +
.../armcontainerservice/v4/models_serde.go | 4846 ++
.../v4/operations_client.go | 87 +
.../armcontainerservice/v4/options.go | 305 +
.../v4/privateendpointconnections_client.go | 314 +
.../v4/privatelinkresources_client.go | 105 +
.../v4/resolveprivatelinkserviceid_client.go | 109 +
.../armcontainerservice/v4/response_types.go | 289 +
.../v4/snapshots_client.go | 407 +
.../armcontainerservice/v4/time_rfc3339.go | 86 +
.../network/armnetwork/v4/CHANGELOG.md | 721 +
.../network/armnetwork/v4/LICENSE.txt | 21 +
.../network/armnetwork/v4/README.md | 119 +
.../v4/adminrulecollections_client.go | 356 +
.../armnetwork/v4/adminrules_client.go | 373 +
...atewayprivateendpointconnections_client.go | 335 +
...ationgatewayprivatelinkresources_client.go | 115 +
.../v4/applicationgateways_client.go | 1102 +
...cationgatewaywafdynamicmanifests_client.go | 110 +
...atewaywafdynamicmanifestsdefault_client.go | 101 +
.../v4/applicationsecuritygroups_client.go | 440 +
.../network/armnetwork/v4/assets.json | 6 +
.../network/armnetwork/v4/autorest.md | 13 +
.../v4/availabledelegations_client.go | 110 +
.../v4/availableendpointservices_client.go | 110 +
.../availableprivateendpointtypes_client.go | 182 +
...vailableresourcegroupdelegations_client.go | 115 +
.../v4/availableservicealiases_client.go | 180 +
.../v4/azurefirewallfqdntags_client.go | 105 +
.../armnetwork/v4/azurefirewalls_client.go | 591 +
.../armnetwork/v4/bastionhosts_client.go | 447 +
.../v4/bgpservicecommunities_client.go | 105 +
.../network/armnetwork/v4/build.go | 7 +
.../network/armnetwork/v4/ci.yml | 28 +
.../network/armnetwork/v4/client_factory.go | 704 +
.../v4/configurationpolicygroups_client.go | 335 +
.../v4/connectionmonitors_client.go | 626 +
.../v4/connectivityconfigurations_client.go | 339 +
.../network/armnetwork/v4/constants.go | 4198 ++
.../armnetwork/v4/customipprefixes_client.go | 442 +
.../v4/ddoscustompolicies_client.go | 314 +
.../v4/ddosprotectionplans_client.go | 439 +
.../v4/defaultsecurityrules_client.go | 181 +
.../armnetwork/v4/dscpconfiguration_client.go | 374 +
...xpressroutecircuitauthorizations_client.go | 335 +
.../expressroutecircuitconnections_client.go | 356 +
.../v4/expressroutecircuitpeerings_client.go | 335 +
.../v4/expressroutecircuits_client.go | 810 +
.../v4/expressrouteconnections_client.go | 326 +
...ressroutecrossconnectionpeerings_client.go | 335 +
.../v4/expressroutecrossconnections_client.go | 616 +
.../v4/expressroutegateways_client.go | 433 +
.../armnetwork/v4/expressroutelinks_client.go | 180 +
.../expressrouteportauthorizations_client.go | 335 +
.../armnetwork/v4/expressrouteports_client.go | 504 +
.../v4/expressrouteportslocations_client.go | 163 +
...xpressrouteproviderportslocation_client.go | 99 +
.../v4/expressrouteserviceproviders_client.go | 105 +
.../armnetwork/v4/firewallpolicies_client.go | 442 +
.../v4/firewallpolicyidpssignatures_client.go | 109 +
...policyidpssignaturesfiltervalues_client.go | 109 +
...allpolicyidpssignaturesoverrides_client.go | 297 +
...rewallpolicyrulecollectiongroups_client.go | 335 +
.../network/armnetwork/v4/flowlogs_client.go | 401 +
.../network/armnetwork/v4/groups_client.go | 338 +
.../armnetwork/v4/hubroutetables_client.go | 333 +
.../v4/hubvirtualnetworkconnections_client.go | 335 +
.../armnetwork/v4/inboundnatrules_client.go | 337 +
.../v4/inboundsecurityrule_client.go | 124 +
.../v4/interfaceipconfigurations_client.go | 181 +
.../v4/interfaceloadbalancers_client.go | 115 +
.../network/armnetwork/v4/interfaces.go | 54 +
.../armnetwork/v4/interfaces_client.go | 1182 +
.../v4/interfacetapconfigurations_client.go | 335 +
.../armnetwork/v4/ipallocations_client.go | 441 +
.../network/armnetwork/v4/ipgroups_client.go | 439 +
.../loadbalancerbackendaddresspools_client.go | 335 +
...balancerfrontendipconfigurations_client.go | 181 +
.../loadbalancerloadbalancingrules_client.go | 181 +
.../loadbalancernetworkinterfaces_client.go | 115 +
.../v4/loadbalanceroutboundrules_client.go | 181 +
.../v4/loadbalancerprobes_client.go | 180 +
.../armnetwork/v4/loadbalancers_client.go | 656 +
.../v4/localnetworkgateways_client.go | 380 +
.../armnetwork/v4/management_client.go | 994 +
...ntgroupnetworkmanagerconnections_client.go | 274 +
.../armnetwork/v4/managercommits_client.go | 119 +
.../v4/managerdeploymentstatus_client.go | 114 +
.../network/armnetwork/v4/managers_client.go | 442 +
.../network/armnetwork/v4/models.go | 13941 ++++++
.../network/armnetwork/v4/models_serde.go | 34501 ++++++++++++++
.../armnetwork/v4/natgateways_client.go | 438 +
.../network/armnetwork/v4/natrules_client.go | 333 +
.../armnetwork/v4/operations_client.go | 93 +
.../network/armnetwork/v4/options.go | 3927 ++
.../armnetwork/v4/p2svpngateways_client.go | 815 +
.../armnetwork/v4/packetcaptures_client.go | 477 +
...erexpressroutecircuitconnections_client.go | 191 +
.../armnetwork/v4/polymorphic_helpers.go | 223 +
.../v4/privatednszonegroups_client.go | 335 +
.../armnetwork/v4/privateendpoints_client.go | 377 +
.../v4/privatelinkservices_client.go | 944 +
.../network/armnetwork/v4/profiles_client.go | 428 +
.../armnetwork/v4/publicipaddresses_client.go | 993 +
.../armnetwork/v4/publicipprefixes_client.go | 442 +
.../v4/resourcenavigationlinks_client.go | 111 +
.../network/armnetwork/v4/response_types.go | 3734 ++
.../armnetwork/v4/routefilterrules_client.go | 334 +
.../armnetwork/v4/routefilters_client.go | 440 +
.../network/armnetwork/v4/routemaps_client.go | 332 +
.../network/armnetwork/v4/routes_client.go | 332 +
.../armnetwork/v4/routetables_client.go | 438 +
.../armnetwork/v4/routingintent_client.go | 333 +
.../armnetwork/v4/scopeconnections_client.go | 312 +
.../v4/securityadminconfigurations_client.go | 336 +
.../armnetwork/v4/securitygroups_client.go | 441 +
.../v4/securitypartnerproviders_client.go | 440 +
.../armnetwork/v4/securityrules_client.go | 333 +
.../v4/serviceassociationlinks_client.go | 111 +
.../v4/serviceendpointpolicies_client.go | 443 +
...serviceendpointpolicydefinitions_client.go | 335 +
.../v4/servicetaginformation_client.go | 119 +
.../armnetwork/v4/servicetags_client.go | 102 +
.../armnetwork/v4/staticmembers_client.go | 331 +
.../network/armnetwork/v4/subnets_client.go | 493 +
...riptionnetworkmanagerconnections_client.go | 274 +
.../network/armnetwork/v4/time_rfc3339.go | 86 +
.../network/armnetwork/v4/usages_client.go | 109 +
.../network/armnetwork/v4/vipswap_client.go | 240 +
.../v4/virtualapplianceconnections_client.go | 336 +
.../armnetwork/v4/virtualappliances_client.go | 442 +
.../v4/virtualappliancesites_client.go | 335 +
.../v4/virtualapplianceskus_client.go | 161 +
.../v4/virtualhubbgpconnection_client.go | 265 +
.../v4/virtualhubbgpconnections_client.go | 265 +
.../v4/virtualhubipconfiguration_client.go | 335 +
.../v4/virtualhubroutetablev2s_client.go | 335 +
.../armnetwork/v4/virtualhubs_client.go | 662 +
...virtualnetworkgatewayconnections_client.go | 900 +
.../virtualnetworkgatewaynatrules_client.go | 337 +
.../v4/virtualnetworkgateways_client.go | 1622 +
.../v4/virtualnetworkpeerings_client.go | 338 +
.../armnetwork/v4/virtualnetworks_client.go | 681 +
.../v4/virtualnetworktaps_client.go | 439 +
.../v4/virtualrouterpeerings_client.go | 335 +
.../armnetwork/v4/virtualrouters_client.go | 376 +
.../armnetwork/v4/virtualwans_client.go | 436 +
.../armnetwork/v4/vpnconnections_client.go | 497 +
.../armnetwork/v4/vpngateways_client.go | 670 +
.../v4/vpnlinkconnections_client.go | 280 +
.../v4/vpnserverconfigurations_client.go | 440 +
...urationsassociatedwithvirtualwan_client.go | 115 +
.../v4/vpnsitelinkconnections_client.go | 116 +
.../armnetwork/v4/vpnsitelinks_client.go | 180 +
.../network/armnetwork/v4/vpnsites_client.go | 436 +
.../v4/vpnsitesconfiguration_client.go | 119 +
.../network/armnetwork/v4/watchers_client.go | 1305 +
.../webapplicationfirewallpolicies_client.go | 366 +
.../armnetwork/v4/webcategories_client.go | 163 +
.../privatedns/armprivatedns/CHANGELOG.md | 14 +
.../privatedns/armprivatedns/LICENSE.txt | 21 +
.../privatedns/armprivatedns/README.md | 91 +
.../privatedns/armprivatedns/assets.json | 6 +
.../privatedns/armprivatedns/autorest.md | 12 +
.../privatedns/armprivatedns/build.go | 7 +
.../privatedns/armprivatedns/ci.yml | 28 +
.../armprivatedns/client_factory.go | 55 +
.../privatedns/armprivatedns/constants.go | 84 +
.../privatedns/armprivatedns/models.go | 462 +
.../privatedns/armprivatedns/models_serde.go | 836 +
.../armprivatedns/privatezones_client.go | 450 +
.../armprivatedns/recordsets_client.go | 467 +
.../armprivatedns/response_types.go | 95 +
.../virtualnetworklinks_client.go | 410 +
.../resources/armresources/CHANGELOG.md | 21 +
.../resources/armresources/LICENSE.txt | 21 +
.../resources/armresources/README.md | 92 +
.../resources/armresources/assets.json | 6 +
.../resources/armresources/autorest.md | 13 +
.../resources/armresources/build.go | 7 +
.../resources/armresources/ci.yml | 28 +
.../resources/armresources/client.go | 921 +
.../resources/armresources/client_factory.go | 79 +
.../resources/armresources/constants.go | 403 +
.../deploymentoperations_client.go | 676 +
.../armresources/deployments_client.go | 2634 ++
.../resources/armresources/models.go | 1808 +
.../resources/armresources/models_serde.go | 3109 ++
.../armresources/operations_client.go | 94 +
.../providerresourcetypes_client.go | 101 +
.../armresources/providers_client.go | 478 +
.../armresources/resourcegroups_client.go | 444 +
.../resources/armresources/response_types.go | 493 +
.../resources/armresources/tags_client.go | 491 +
.../resources/armresources/time_rfc3339.go | 87 +
.../mgmt/2019-06-01/network/CHANGELOG.md | 2 -
.../mgmt/2019-06-01/network/_meta.json | 11 -
.../2019-06-01/network/applicationgateways.go | 1476 -
.../network/applicationsecuritygroups.go | 580 -
.../network/availabledelegations.go | 148 -
.../network/availableendpointservices.go | 148 -
.../network/availableprivateendpointtypes.go | 267 -
.../availableresourcegroupdelegations.go | 151 -
.../network/azurefirewallfqdntags.go | 145 -
.../mgmt/2019-06-01/network/azurefirewalls.go | 577 -
.../mgmt/2019-06-01/network/bastionhosts.go | 579 -
.../network/bgpservicecommunities.go | 145 -
.../network/mgmt/2019-06-01/network/client.go | 200 -
.../2019-06-01/network/connectionmonitors.go | 683 -
.../2019-06-01/network/ddoscustompolicies.go | 351 -
.../2019-06-01/network/ddosprotectionplans.go | 583 -
.../network/defaultsecurityrules.go | 228 -
.../network/mgmt/2019-06-01/network/enums.go | 2076 -
.../expressroutecircuitauthorizations.go | 396 -
.../network/expressroutecircuitconnections.go | 403 -
.../network/expressroutecircuitpeerings.go | 406 -
.../network/expressroutecircuits.go | 985 -
.../network/expressrouteconnections.go | 359 -
.../expressroutecrossconnectionpeerings.go | 407 -
.../network/expressroutecrossconnections.go | 754 -
.../network/expressroutegateways.go | 423 -
.../2019-06-01/network/expressroutelinks.go | 228 -
.../2019-06-01/network/expressrouteports.go | 580 -
.../network/expressrouteportslocations.go | 221 -
.../network/expressrouteserviceproviders.go | 145 -
.../2019-06-01/network/firewallpolicies.go | 581 -
.../network/firewallpolicyrulegroups.go | 406 -
.../network/hubvirtualnetworkconnections.go | 228 -
.../2019-06-01/network/inboundnatrules.go | 416 -
.../network/interfaceipconfigurations.go | 228 -
.../network/interfaceloadbalancers.go | 150 -
.../2019-06-01/network/interfacesgroup.go | 1277 -
.../network/interfacetapconfigurations.go | 429 -
.../loadbalancerbackendaddresspools.go | 228 -
.../loadbalancerfrontendipconfigurations.go | 229 -
.../network/loadbalancerloadbalancingrules.go | 228 -
.../network/loadbalancernetworkinterfaces.go | 150 -
.../network/loadbalanceroutboundrules.go | 228 -
.../2019-06-01/network/loadbalancerprobes.go | 228 -
.../mgmt/2019-06-01/network/loadbalancers.go | 582 -
.../network/localnetworkgateways.go | 493 -
.../network/mgmt/2019-06-01/network/models.go | 37163 ----------------
.../mgmt/2019-06-01/network/natgateways.go | 579 -
.../mgmt/2019-06-01/network/operations.go | 140 -
.../mgmt/2019-06-01/network/p2svpngateways.go | 741 -
.../network/p2svpnserverconfigurations.go | 394 -
.../mgmt/2019-06-01/network/packetcaptures.go | 520 -
.../peerexpressroutecircuitconnections.go | 233 -
.../2019-06-01/network/privateendpoints.go | 501 -
.../2019-06-01/network/privatelinkservices.go | 1063 -
.../mgmt/2019-06-01/network/profiles.go | 576 -
.../2019-06-01/network/publicipaddresses.go | 927 -
.../2019-06-01/network/publicipprefixes.go | 583 -
.../network/resourcenavigationlinks.go | 110 -
.../2019-06-01/network/routefilterrules.go | 489 -
.../mgmt/2019-06-01/network/routefilters.go | 586 -
.../network/mgmt/2019-06-01/network/routes.go | 391 -
.../mgmt/2019-06-01/network/routetables.go | 582 -
.../mgmt/2019-06-01/network/securitygroups.go | 582 -
.../mgmt/2019-06-01/network/securityrules.go | 391 -
.../network/serviceassociationlinks.go | 110 -
.../network/serviceendpointpolicies.go | 583 -
.../serviceendpointpolicydefinitions.go | 393 -
.../mgmt/2019-06-01/network/servicetags.go | 107 -
.../mgmt/2019-06-01/network/subnets.go | 563 -
.../network/mgmt/2019-06-01/network/usages.go | 154 -
.../mgmt/2019-06-01/network/version.go | 19 -
.../mgmt/2019-06-01/network/virtualhubs.go | 579 -
.../virtualnetworkgatewayconnections.go | 743 -
.../network/virtualnetworkgateways.go | 1653 -
.../network/virtualnetworkpeerings.go | 393 -
.../2019-06-01/network/virtualnetworks.go | 778 -
.../2019-06-01/network/virtualnetworktaps.go | 611 -
.../mgmt/2019-06-01/network/virtualwans.go | 579 -
.../mgmt/2019-06-01/network/vpnconnections.go | 393 -
.../mgmt/2019-06-01/network/vpngateways.go | 658 -
.../2019-06-01/network/vpnlinkconnections.go | 152 -
.../network/vpnsitelinkconnections.go | 112 -
.../mgmt/2019-06-01/network/vpnsitelinks.go | 227 -
.../mgmt/2019-06-01/network/vpnsites.go | 579 -
.../network/vpnsitesconfiguration.go | 120 -
.../mgmt/2019-06-01/network/watchers.go | 1560 -
.../network/webapplicationfirewallpolicies.go | 513 -
.../mgmt/2018-05-01/resources/CHANGELOG.md | 2 -
.../mgmt/2018-05-01/resources/_meta.json | 11 -
.../mgmt/2018-05-01/resources/client.go | 43 -
.../resources/deploymentoperations.go | 469 -
.../mgmt/2018-05-01/resources/deployments.go | 1618 -
.../mgmt/2018-05-01/resources/enums.go | 56 -
.../mgmt/2018-05-01/resources/groups.go | 666 -
.../mgmt/2018-05-01/resources/models.go | 2552 --
.../mgmt/2018-05-01/resources/operations.go | 140 -
.../mgmt/2018-05-01/resources/providers.go | 382 -
.../mgmt/2018-05-01/resources/resources.go | 1360 -
.../mgmt/2018-05-01/resources/tags.go | 443 -
.../mgmt/2018-05-01/resources/version.go | 19 -
.../apps/cache/cache.go | 31 +-
.../apps/confidential/confidential.go | 471 +-
.../apps/errors/error_design.md | 2 +-
.../apps/internal/base/base.go | 333 +-
.../internal/base/internal/storage/items.go | 9 +-
.../internal/storage/partitioned_storage.go | 66 +-
.../internal/base/internal/storage/storage.go | 67 +-
.../storage/test_serialized_cache.json | 56 -
.../apps/internal/oauth/oauth.go | 73 +-
.../oauth/ops/accesstokens/accesstokens.go | 41 +-
.../internal/oauth/ops/accesstokens/tokens.go | 26 +-
.../internal/oauth/ops/authority/authority.go | 197 +-
.../apps/internal/oauth/resolvers.go | 3 -
.../apps/internal/options/options.go | 52 +
.../apps/internal/shared/shared.go | 3 +-
.../apps/internal/version/version.go | 2 +-
.../apps/public/public.go | 499 +-
.../antlr4/runtime/Go/antlr/{ => v4}/LICENSE | 0
.../antlr4/runtime/Go/antlr/v4/antlrdoc.go | 68 +
.../antlr4/runtime/Go/antlr/{ => v4}/atn.go | 27 +-
.../runtime/Go/antlr/{ => v4}/atn_config.go | 54 +-
.../Go/antlr/{ => v4}/atn_config_set.go | 80 +-
.../{ => v4}/atn_deserialization_options.go | 2 +-
.../Go/antlr/{ => v4}/atn_deserializer.go | 2 +-
.../Go/antlr/{ => v4}/atn_simulator.go | 2 +-
.../runtime/Go/antlr/{ => v4}/atn_state.go | 9 +-
.../runtime/Go/antlr/{ => v4}/atn_type.go | 2 +-
.../runtime/Go/antlr/{ => v4}/char_stream.go | 2 +-
.../Go/antlr/{ => v4}/common_token_factory.go | 2 +-
.../Go/antlr/{ => v4}/common_token_stream.go | 6 +-
.../antlr4/runtime/Go/antlr/v4/comparators.go | 147 +
.../antlr4/runtime/Go/antlr/{ => v4}/dfa.go | 54 +-
.../Go/antlr/{ => v4}/dfa_serializer.go | 2 +-
.../runtime/Go/antlr/{ => v4}/dfa_state.go | 54 +-
.../{ => v4}/diagnostic_error_listener.go | 4 +-
.../Go/antlr/{ => v4}/error_listener.go | 6 +-
.../Go/antlr/{ => v4}/error_strategy.go | 38 +-
.../runtime/Go/antlr/{ => v4}/errors.go | 5 +-
.../runtime/Go/antlr/{ => v4}/file_stream.go | 2 +-
.../runtime/Go/antlr/{ => v4}/input_stream.go | 2 +-
.../runtime/Go/antlr/{ => v4}/int_stream.go | 2 +-
.../runtime/Go/antlr/{ => v4}/interval_set.go | 6 +-
.../antlr4/runtime/Go/antlr/v4/jcollect.go | 198 +
.../antlr4/runtime/Go/antlr/{ => v4}/lexer.go | 6 +-
.../runtime/Go/antlr/{ => v4}/lexer_action.go | 44 +-
.../antlr/{ => v4}/lexer_action_executor.go | 27 +-
.../Go/antlr/{ => v4}/lexer_atn_simulator.go | 17 +-
.../runtime/Go/antlr/{ => v4}/ll1_analyzer.go | 30 +-
.../runtime/Go/antlr/{ => v4}/parser.go | 14 +-
.../Go/antlr/{ => v4}/parser_atn_simulator.go | 153 +-
.../Go/antlr/{ => v4}/parser_rule_context.go | 4 +-
.../Go/antlr/{ => v4}/prediction_context.go | 161 +-
.../Go/antlr/{ => v4}/prediction_mode.go | 48 +-
.../runtime/Go/antlr/{ => v4}/recognizer.go | 19 +-
.../runtime/Go/antlr/{ => v4}/rule_context.go | 2 +-
.../Go/antlr/{ => v4}/semantic_context.go | 81 +-
.../antlr4/runtime/Go/antlr/{ => v4}/token.go | 3 +-
.../runtime/Go/antlr/{ => v4}/token_source.go | 2 +-
.../runtime/Go/antlr/{ => v4}/token_stream.go | 2 +-
.../Go/antlr/{ => v4}/tokenstream_rewriter.go | 420 +-
.../Go/antlr/{ => v4}/trace_listener.go | 2 +-
.../runtime/Go/antlr/{ => v4}/transition.go | 2 +-
.../antlr4/runtime/Go/antlr/{ => v4}/tree.go | 5 +-
.../antlr4/runtime/Go/antlr/{ => v4}/trees.go | 9 +-
.../antlr4/runtime/Go/antlr/{ => v4}/utils.go | 21 +-
.../runtime/Go/antlr/{ => v4}/utils_set.go | 34 +-
.../spec/lib/go/csi/csi.pb.go | 1562 +-
.../emicklei/go-restful/v3/CHANGES.md | 20 +-
.../emicklei/go-restful/v3/README.md | 4 +
.../emicklei/go-restful/v3/constants.go | 2 +
.../emicklei/go-restful/v3/request.go | 5 +-
.../emicklei/go-restful/v3/response.go | 3 +
.../emicklei/go-restful/v3/route.go | 4 +-
.../emicklei/go-restful/v3/route_builder.go | 34 +-
.../jsonreference/internal/normalize_url.go | 5 +
vendor/github.com/go-openapi/swag/util.go | 16 +-
.../github.com/golang-jwt/jwt/v5/.gitignore | 4 +
.../go/azidext => golang-jwt/jwt/v5}/LICENSE | 6 +-
.../golang-jwt/jwt/v5/MIGRATION_GUIDE.md | 185 +
vendor/github.com/golang-jwt/jwt/v5/README.md | 167 +
.../github.com/golang-jwt/jwt/v5/SECURITY.md | 19 +
.../golang-jwt/jwt/v5/VERSION_HISTORY.md | 137 +
vendor/github.com/golang-jwt/jwt/v5/claims.go | 16 +
vendor/github.com/golang-jwt/jwt/v5/doc.go | 4 +
vendor/github.com/golang-jwt/jwt/v5/ecdsa.go | 134 +
.../golang-jwt/jwt/v5/ecdsa_utils.go | 69 +
.../github.com/golang-jwt/jwt/v5/ed25519.go | 80 +
.../golang-jwt/jwt/v5/ed25519_utils.go | 64 +
vendor/github.com/golang-jwt/jwt/v5/errors.go | 49 +
.../golang-jwt/jwt/v5/errors_go1_20.go | 47 +
.../golang-jwt/jwt/v5/errors_go_other.go | 78 +
vendor/github.com/golang-jwt/jwt/v5/hmac.go | 104 +
.../golang-jwt/jwt/v5/map_claims.go | 109 +
vendor/github.com/golang-jwt/jwt/v5/none.go | 50 +
vendor/github.com/golang-jwt/jwt/v5/parser.go | 215 +
.../golang-jwt/jwt/v5/parser_option.go | 120 +
.../golang-jwt/jwt/v5/registered_claims.go | 63 +
vendor/github.com/golang-jwt/jwt/v5/rsa.go | 93 +
.../github.com/golang-jwt/jwt/v5/rsa_pss.go | 135 +
.../github.com/golang-jwt/jwt/v5/rsa_utils.go | 107 +
.../golang-jwt/jwt/v5/signing_method.go | 49 +
.../golang-jwt/jwt/v5/staticcheck.conf | 1 +
vendor/github.com/golang-jwt/jwt/v5/token.go | 86 +
.../golang-jwt/jwt/v5/token_option.go | 5 +
vendor/github.com/golang-jwt/jwt/v5/types.go | 150 +
.../github.com/golang-jwt/jwt/v5/validator.go | 301 +
.../github.com/google/cel-go/cel/BUILD.bazel | 5 +-
vendor/github.com/google/cel-go/cel/decls.go | 79 +-
vendor/github.com/google/cel-go/cel/env.go | 126 +-
vendor/github.com/google/cel-go/cel/io.go | 4 +-
.../github.com/google/cel-go/cel/library.go | 258 +
vendor/github.com/google/cel-go/cel/macro.go | 14 +-
.../github.com/google/cel-go/cel/options.go | 64 +-
.../github.com/google/cel-go/cel/program.go | 114 +-
.../google/cel-go/checker/BUILD.bazel | 4 +-
.../google/cel-go/checker/checker.go | 139 +-
.../github.com/google/cel-go/checker/cost.go | 63 +-
.../google/cel-go/checker/decls/BUILD.bazel | 2 +-
.../google/cel-go/checker/decls/decls.go | 8 +-
.../github.com/google/cel-go/checker/env.go | 27 +-
.../google/cel-go/checker/printer.go | 2 +-
.../google/cel-go/checker/standard.go | 2 +
.../github.com/google/cel-go/checker/types.go | 47 +-
.../google/cel-go/common/BUILD.bazel | 2 +-
.../cel-go/common/containers/BUILD.bazel | 4 +-
.../google/cel-go/common/debug/BUILD.bazel | 2 +-
.../google/cel-go/common/debug/debug.go | 14 +-
.../github.com/google/cel-go/common/errors.go | 2 +-
.../cel-go/common/operators/operators.go | 4 +
.../cel-go/common/overloads/overloads.go | 10 +
.../google/cel-go/common/types/BUILD.bazel | 10 +-
.../google/cel-go/common/types/bool.go | 9 +-
.../google/cel-go/common/types/bytes.go | 9 +-
.../google/cel-go/common/types/double.go | 13 +-
.../google/cel-go/common/types/duration.go | 17 +-
.../google/cel-go/common/types/err.go | 25 +-
.../google/cel-go/common/types/int.go | 23 +-
.../google/cel-go/common/types/iterator.go | 4 +-
.../google/cel-go/common/types/json_value.go | 1 +
.../google/cel-go/common/types/list.go | 88 +-
.../google/cel-go/common/types/map.go | 59 +-
.../google/cel-go/common/types/null.go | 26 +-
.../google/cel-go/common/types/object.go | 14 +-
.../google/cel-go/common/types/optional.go | 108 +
.../google/cel-go/common/types/pb/BUILD.bazel | 2 +-
.../google/cel-go/common/types/pb/enum.go | 4 +-
.../google/cel-go/common/types/pb/file.go | 83 +-
.../google/cel-go/common/types/pb/pb.go | 53 +-
.../google/cel-go/common/types/pb/type.go | 66 +-
.../google/cel-go/common/types/provider.go | 30 +-
.../cel-go/common/types/ref/BUILD.bazel | 2 +-
.../cel-go/common/types/ref/provider.go | 8 +-
.../cel-go/common/types/ref/reference.go | 15 +-
.../google/cel-go/common/types/string.go | 9 +-
.../google/cel-go/common/types/timestamp.go | 19 +-
.../cel-go/common/types/traits/BUILD.bazel | 1 +
.../types/traits/zeroer.go} | 26 +-
.../google/cel-go/common/types/type.go | 4 +-
.../google/cel-go/common/types/uint.go | 21 +-
.../google/cel-go/common/types/unknown.go | 4 +-
.../github.com/google/cel-go/ext/BUILD.bazel | 32 +
vendor/github.com/google/cel-go/ext/README.md | 205 +
.../github.com/google/cel-go/ext/bindings.go | 100 +
.../github.com/google/cel-go/ext/encoders.go | 18 +-
vendor/github.com/google/cel-go/ext/guards.go | 13 +
vendor/github.com/google/cel-go/ext/math.go | 388 +
vendor/github.com/google/cel-go/ext/native.go | 574 +
vendor/github.com/google/cel-go/ext/protos.go | 145 +
vendor/github.com/google/cel-go/ext/sets.go | 138 +
.../github.com/google/cel-go/ext/strings.go | 776 +-
.../google/cel-go/interpreter/BUILD.bazel | 7 +-
.../google/cel-go/interpreter/activation.go | 30 +-
.../cel-go/interpreter/attribute_patterns.go | 77 +-
.../google/cel-go/interpreter/attributes.go | 1064 +-
.../google/cel-go/interpreter/decorators.go | 11 +-
.../google/cel-go/interpreter/formatting.go | 383 +
.../cel-go/interpreter/functions/functions.go | 2 +-
.../cel-go/interpreter/interpretable.go | 469 +-
.../google/cel-go/interpreter/interpreter.go | 12 +-
.../google/cel-go/interpreter/planner.go | 178 +-
.../google/cel-go/interpreter/prune.go | 373 +-
.../google/cel-go/interpreter/runtimecost.go | 58 +-
.../google/cel-go/parser/BUILD.bazel | 8 +-
.../google/cel-go/parser/gen/BUILD.bazel | 2 +-
.../google/cel-go/parser/gen/CEL.g4 | 38 +-
.../google/cel-go/parser/gen/CEL.interp | 5 +-
.../cel-go/parser/gen/cel_base_listener.go | 48 +-
.../cel-go/parser/gen/cel_base_visitor.go | 28 +-
.../google/cel-go/parser/gen/cel_lexer.go | 4 +-
.../google/cel-go/parser/gen/cel_listener.go | 48 +-
.../google/cel-go/parser/gen/cel_parser.go | 2108 +-
.../google/cel-go/parser/gen/cel_visitor.go | 26 +-
.../google/cel-go/parser/gen/generate.sh | 2 +-
.../github.com/google/cel-go/parser/helper.go | 179 +-
.../github.com/google/cel-go/parser/input.go | 3 +-
.../github.com/google/cel-go/parser/macro.go | 20 +-
.../google/cel-go/parser/options.go | 25 +-
.../github.com/google/cel-go/parser/parser.go | 479 +-
.../google/cel-go/parser/unparser.go | 63 +-
.../{gnostic => gnostic-models}/LICENSE | 0
.../compiler/README.md | 0
.../compiler/context.go | 0
.../compiler/error.go | 0
.../compiler/extensions.go | 2 +-
.../compiler/helpers.go | 2 +-
.../compiler/main.go | 0
.../compiler/reader.go | 0
.../extensions/README.md | 0
.../extensions/extension.pb.go | 4 +-
.../extensions/extension.proto | 0
.../extensions/extensions.go | 0
.../jsonschema/README.md | 0
.../jsonschema/base.go | 15 +-
.../jsonschema/display.go | 0
.../jsonschema/models.go | 0
.../jsonschema/operations.go | 0
.../jsonschema/reader.go | 0
.../jsonschema/schema.json | 0
.../jsonschema/writer.go | 0
.../openapiv2/OpenAPIv2.go | 2 +-
.../openapiv2/OpenAPIv2.pb.go | 4 +-
.../openapiv2/OpenAPIv2.proto | 0
.../openapiv2/README.md | 0
.../openapiv2/document.go | 2 +-
.../openapiv2/openapi-2.0.json | 0
.../openapiv3/OpenAPIv3.go | 2 +-
.../openapiv3/OpenAPIv3.pb.go | 4 +-
.../openapiv3/OpenAPIv3.proto | 0
.../openapiv3/README.md | 0
.../openapiv3/document.go | 2 +-
.../google/gnostic/openapiv3/openapi-3.0.json | 1251 -
.../google/gnostic/openapiv3/openapi-3.1.json | 1250 -
.../github.com/google/pprof/profile/encode.go | 85 +-
.../github.com/google/pprof/profile/filter.go | 4 +
.../google/pprof/profile/legacy_profile.go | 31 +-
.../github.com/google/pprof/profile/merge.go | 278 +-
.../google/pprof/profile/profile.go | 61 +-
.../github.com/google/pprof/profile/proto.go | 19 +-
.../github.com/google/pprof/profile/prune.go | 26 +-
.../azure_identity_credential_adapter.go | 118 -
.../protosanitizer/protosanitizer.go | 9 +-
.../csi-proxy/client/apiversion/version.go | 2 +-
.../client/groups/disk/v1/client_generated.go | 2 +-
.../groups/filesystem/v1/client_generated.go | 2 +-
.../groups/volume/v1/client_generated.go | 2 +-
.../mitchellh/mapstructure/CHANGELOG.md | 73 -
.../mitchellh/mapstructure/README.md | 46 -
.../mitchellh/mapstructure/decode_hooks.go | 256 -
.../mitchellh/mapstructure/error.go | 50 -
.../mitchellh/mapstructure/mapstructure.go | 1462 -
vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md | 29 +
vendor/github.com/onsi/ginkgo/v2/README.md | 6 +-
vendor/github.com/onsi/ginkgo/v2/core_dsl.go | 94 +-
.../onsi/ginkgo/v2/ginkgo/outline/ginkgo.go | 4 +-
.../onsi/ginkgo/v2/internal/global/init.go | 11 +
.../onsi/ginkgo/v2/internal/group.go | 5 +-
.../onsi/ginkgo/v2/internal/node.go | 10 +-
.../onsi/ginkgo/v2/internal/suite.go | 24 +
.../onsi/ginkgo/v2/internal/writer.go | 6 +-
.../github.com/onsi/ginkgo/v2/types/config.go | 1 +
.../github.com/onsi/ginkgo/v2/types/errors.go | 4 +-
.../github.com/onsi/ginkgo/v2/types/types.go | 4 +-
.../onsi/ginkgo/v2/types/version.go | 2 +-
vendor/github.com/onsi/gomega/CHANGELOG.md | 34 +
.../github.com/onsi/gomega/format/format.go | 4 +-
vendor/github.com/onsi/gomega/gomega_dsl.go | 2 +-
vendor/github.com/onsi/gomega/matchers.go | 6 +-
.../onsi/gomega/matchers/be_a_directory.go | 2 +-
.../onsi/gomega/matchers/be_a_regular_file.go | 2 +-
.../gomega/matchers/be_an_existing_file.go | 4 +-
.../gomega/matchers/have_http_body_matcher.go | 9 +-
vendor/github.com/pborman/uuid/time.go | 2 +-
vendor/github.com/pborman/uuid/version4.go | 2 +-
vendor/github.com/pelletier/go-toml/README.md | 6 +-
.../github.com/pelletier/go-toml/SECURITY.md | 19 +
.../github.com/pelletier/go-toml/marshal.go | 2 +-
vendor/github.com/pelletier/go-toml/parser.go | 47 +-
vendor/github.com/pelletier/go-toml/toml.go | 2 +-
.../collectors/go_collector_latest.go | 2 +
.../client_golang/prometheus/counter.go | 26 +-
.../client_golang/prometheus/desc.go | 46 +-
.../client_golang/prometheus/doc.go | 44 +-
.../client_golang/prometheus/gauge.go | 26 +-
.../prometheus/go_collector_latest.go | 7 +-
.../client_golang/prometheus/histogram.go | 61 +-
.../client_golang/prometheus/labels.go | 72 +
.../client_golang/prometheus/metric.go | 6 +-
.../client_golang/prometheus/promhttp/http.go | 19 +-
.../prometheus/promhttp/instrument_client.go | 26 +-
.../prometheus/promhttp/instrument_server.go | 101 +-
.../prometheus/promhttp/option.go | 38 +-
.../client_golang/prometheus/registry.go | 17 +-
.../client_golang/prometheus/summary.go | 39 +-
.../prometheus/testutil/promlint/promlint.go | 20 +-
.../prometheus/testutil/testutil.go | 1 +
.../client_golang/prometheus/timer.go | 28 +-
.../client_golang/prometheus/value.go | 10 +-
.../client_golang/prometheus/vec.go | 79 +-
.../client_golang/prometheus/vnext.go | 23 +
.../client_golang/prometheus/wrap.go | 8 +-
.../prometheus/client_model/go/metrics.pb.go | 1530 +-
.../prometheus/common/expfmt/decode.go | 39 +-
.../prometheus/common/expfmt/encode.go | 13 +-
.../prometheus/common/expfmt/expfmt.go | 26 +-
.../prometheus/common/expfmt/fuzz.go | 4 +-
.../common/expfmt/openmetrics_create.go | 22 +-
.../prometheus/common/expfmt/text_create.go | 3 +-
.../prometheus/common/expfmt/text_parse.go | 12 +-
.../bitbucket.org/ww/goautoneg/autoneg.go | 22 +-
.../prometheus/common/model/time.go | 89 +-
.../prometheus/common/model/value.go | 246 +-
.../prometheus/common/model/value_float.go | 100 +
.../common/model/value_histogram.go | 178 +
.../prometheus/common/model/value_type.go | 83 +
.../prometheus/procfs/Makefile.common | 23 +-
.../github.com/prometheus/procfs/cpuinfo.go | 36 +
.../prometheus/procfs/cpuinfo_loong64.go | 19 +
.../prometheus/procfs/cpuinfo_others.go | 4 +-
vendor/github.com/prometheus/procfs/doc.go | 51 +-
vendor/github.com/prometheus/procfs/fs.go | 9 +-
.../prometheus/procfs/fs_statfs_notype.go | 23 +
.../prometheus/procfs/fs_statfs_type.go | 33 +
.../prometheus/procfs/internal/util/parse.go | 15 +
.../prometheus/procfs/mountstats.go | 9 +-
.../prometheus/procfs/net_conntrackstat.go | 88 +-
.../prometheus/procfs/net_softnet.go | 75 +-
.../prometheus/procfs/net_wireless.go | 182 +
.../github.com/prometheus/procfs/netstat.go | 54 +-
vendor/github.com/prometheus/procfs/proc.go | 22 +-
.../prometheus/procfs/proc_cgroup.go | 2 +-
.../prometheus/procfs/proc_interrupts.go | 98 +
.../prometheus/procfs/proc_netstat.go | 491 +-
.../github.com/prometheus/procfs/proc_snmp.go | 318 +-
.../prometheus/procfs/proc_snmp6.go | 364 +-
.../github.com/prometheus/procfs/proc_stat.go | 10 +-
.../prometheus/procfs/proc_status.go | 38 +-
vendor/github.com/prometheus/procfs/stat.go | 22 +-
vendor/github.com/prometheus/procfs/thread.go | 80 +
vendor/github.com/prometheus/procfs/vm.go | 4 +-
vendor/golang.org/x/exp/LICENSE | 27 +
vendor/golang.org/x/exp/PATENTS | 22 +
.../x/exp/constraints/constraints.go | 50 +
vendor/golang.org/x/exp/slices/slices.go | 218 +
vendor/golang.org/x/exp/slices/sort.go | 127 +
vendor/golang.org/x/exp/slices/zsortfunc.go | 479 +
.../golang.org/x/exp/slices/zsortordered.go | 481 +
vendor/golang.org/x/sync/errgroup/errgroup.go | 132 +
vendor/golang.org/x/sync/errgroup/go120.go | 13 +
.../golang.org/x/sync/errgroup/pre_go120.go | 14 +
.../x/text/feature/plural/common.go | 70 +
.../x/text/feature/plural/message.go | 244 +
.../x/text/feature/plural/plural.go | 262 +
.../x/text/feature/plural/tables.go | 552 +
.../x/text/internal/catmsg/catmsg.go | 417 +
.../x/text/internal/catmsg/codec.go | 407 +
.../x/text/internal/catmsg/varint.go | 62 +
.../x/text/internal/format/format.go | 41 +
.../x/text/internal/format/parser.go | 358 +
vendor/golang.org/x/text/internal/internal.go | 49 +
vendor/golang.org/x/text/internal/match.go | 67 +
.../x/text/internal/number/common.go | 55 +
.../x/text/internal/number/decimal.go | 500 +
.../x/text/internal/number/format.go | 535 +
.../x/text/internal/number/number.go | 152 +
.../x/text/internal/number/pattern.go | 485 +
.../internal/number/roundingmode_string.go | 30 +
.../x/text/internal/number/tables.go | 1219 +
.../x/text/internal/stringset/set.go | 86 +
vendor/golang.org/x/text/message/catalog.go | 36 +
.../x/text/message/catalog/catalog.go | 365 +
.../golang.org/x/text/message/catalog/dict.go | 129 +
.../golang.org/x/text/message/catalog/go19.go | 15 +
.../x/text/message/catalog/gopre19.go | 23 +
vendor/golang.org/x/text/message/doc.go | 99 +
vendor/golang.org/x/text/message/format.go | 510 +
vendor/golang.org/x/text/message/message.go | 193 +
vendor/golang.org/x/text/message/print.go | 984 +
.../k8s.io/apimachinery/pkg/api/errors/OWNERS | 1 -
.../k8s.io/apimachinery/pkg/api/meta/help.go | 83 +-
.../apimachinery/pkg/api/resource/OWNERS | 1 -
.../pkg/apis/meta/v1/generated.proto | 2 -
.../apimachinery/pkg/apis/meta/v1/types.go | 22 +-
.../apis/meta/v1/unstructured/unstructured.go | 5 +
.../meta/v1/unstructured/unstructured_list.go | 9 +
.../k8s.io/apimachinery/pkg/runtime/codec.go | 1 -
.../apimachinery/pkg/runtime/interfaces.go | 5 +
.../pkg/runtime/schema/group_version.go | 2 +-
.../k8s.io/apimachinery/pkg/runtime/splice.go | 76 +
.../apimachinery/pkg/util/cache/expiring.go | 12 +-
.../k8s.io/apimachinery/pkg/util/diff/diff.go | 37 +-
.../k8s.io/apimachinery/pkg/util/dump/dump.go | 54 +
.../pkg/util/httpstream/spdy/roundtripper.go | 4 +-
.../apimachinery/pkg/util/intstr/intstr.go | 7 +-
.../managedfields/internal/fieldmanager.go | 25 +-
.../managedfields/internal/skipnonapplied.go | 14 +-
.../managedfields/internal/versioncheck.go | 52 +
.../apimachinery/pkg/util/mergepatch/util.go | 4 +-
.../k8s.io/apimachinery/pkg/util/net/util.go | 6 +
.../apimachinery/pkg/util/runtime/runtime.go | 15 +-
.../pkg/util/strategicpatch/patch.go | 63 +-
.../apimachinery/pkg/util/version/version.go | 5 +
.../k8s.io/apimachinery/pkg/util/wait/poll.go | 28 +-
.../v1alpha1/paramref.go | 27 +-
.../v1alpha1/validatingadmissionpolicyspec.go | 14 +
.../v1alpha1/variable.go | 48 +
.../v1beta1/auditannotation.go | 48 +
.../v1beta1/expressionwarning.go | 48 +
.../v1beta1/matchresources.go | 90 +
.../v1beta1/namedrulewithoperations.go | 95 +
.../v1beta1/paramkind.go | 48 +
.../admissionregistration/v1beta1/paramref.go | 71 +
.../v1beta1/typechecking.go | 44 +
.../v1beta1/validatingadmissionpolicy.go | 256 +
.../validatingadmissionpolicybinding.go | 247 +
.../validatingadmissionpolicybindingspec.go | 72 +
.../v1beta1/validatingadmissionpolicyspec.go | 117 +
.../validatingadmissionpolicystatus.go | 66 +
.../v1beta1/validation.go | 70 +
.../admissionregistration/v1beta1/variable.go | 48 +
.../v1alpha1/serverstorageversion.go | 11 +
.../applyconfigurations/batch/v1/jobspec.go | 27 +
.../applyconfigurations/batch/v1/jobstatus.go | 18 +
.../applyconfigurations/core/v1/container.go | 9 +
.../core/v1/ephemeralcontainer.go | 8 +
.../core/v1/ephemeralcontainercommon.go | 9 +
.../applyconfigurations/core/v1/hostip.go | 39 +
.../core/v1/persistentvolumeclaimstatus.go | 28 +-
.../core/v1/persistentvolumestatus.go | 16 +-
.../core/v1/podresourceclaimstatus.go | 48 +
.../applyconfigurations/core/v1/podstatus.go | 56 +-
.../extensions/v1beta1/networkpolicy.go | 11 +-
.../extensions/v1beta1/networkpolicystatus.go | 48 -
.../exemptprioritylevelconfiguration.go | 48 +
.../prioritylevelconfigurationspec.go | 9 +
.../exemptprioritylevelconfiguration.go | 48 +
.../v1beta1/prioritylevelconfigurationspec.go | 9 +
.../exemptprioritylevelconfiguration.go | 48 +
.../v1beta2/prioritylevelconfigurationspec.go | 9 +
.../exemptprioritylevelconfiguration.go | 48 +
.../v1beta3/prioritylevelconfigurationspec.go | 9 +
.../applyconfigurations/internal/internal.go | 440 +-
.../networking/v1/networkpolicy.go | 11 +-
.../networking/v1/networkpolicystatus.go | 48 -
.../discovery/cached/memory/memcache.go | 2 +-
.../client-go/discovery/discovery_client.go | 48 +-
.../v1beta1/interface.go | 14 +
.../v1beta1/validatingadmissionpolicy.go | 89 +
.../validatingadmissionpolicybinding.go | 89 +
vendor/k8s.io/client-go/informers/factory.go | 4 +-
vendor/k8s.io/client-go/informers/generic.go | 4 +
.../v1beta1/admissionregistration_client.go | 10 +
.../v1beta1/generated_expansion.go | 4 +
.../v1beta1/validatingadmissionpolicy.go | 243 +
.../validatingadmissionpolicybinding.go | 197 +
.../v1/authentication_client.go | 5 +
.../authentication/v1/generated_expansion.go | 2 +
.../authentication/v1/selfsubjectreview.go | 64 +
.../typed/extensions/v1beta1/networkpolicy.go | 48 -
.../typed/networking/v1/networkpolicy.go | 48 -
.../v1beta1/expansion_generated.go | 8 +
.../v1beta1/validatingadmissionpolicy.go | 68 +
.../validatingadmissionpolicybinding.go | 68 +
.../k8s.io/client-go/openapi/typeconverter.go | 48 +
.../plugin/pkg/client/auth/exec/exec.go | 6 +-
vendor/k8s.io/client-go/rest/config.go | 10 +-
vendor/k8s.io/client-go/rest/request.go | 28 +-
vendor/k8s.io/client-go/rest/url_utils.go | 4 +-
vendor/k8s.io/client-go/tools/cache/OWNERS | 4 +-
.../client-go/tools/cache/controller.go | 4 -
.../client-go/tools/cache/object-names.go | 65 +
.../k8s.io/client-go/tools/cache/reflector.go | 30 +-
.../client-go/tools/cache/shared_informer.go | 37 +-
vendor/k8s.io/client-go/tools/cache/store.go | 31 +-
.../client-go/tools/clientcmd/api/types.go | 14 +-
.../client-go/tools/clientcmd/loader.go | 24 +-
.../k8s.io/client-go/tools/metrics/metrics.go | 48 +
vendor/k8s.io/client-go/tools/pager/pager.go | 36 +-
vendor/k8s.io/client-go/tools/record/event.go | 2 +-
.../client-go/tools/watch/retrywatcher.go | 7 +-
vendor/k8s.io/client-go/transport/cache.go | 6 +
.../kube-openapi/pkg/builder/openapi.go | 2 +-
.../kube-openapi/pkg/builder/parameters.go | 259 +
.../k8s.io/kube-openapi/pkg/cached/cache.go | 166 +-
.../kube-openapi/pkg/handler/handler.go | 20 +-
.../kube-openapi/pkg/handler3/handler.go | 2 +-
.../kube-openapi/pkg/util/proto/document.go | 2 +-
.../pkg/util/proto/document_v3.go | 2 +-
.../pkg/validation/spec/gnostic.go | 2 +-
.../pkg/validation/strfmt/format.go | 81 -
vendor/k8s.io/utils/nsenter/nsenter.go | 27 +-
vendor/k8s.io/utils/trace/trace.go | 19 +
vendor/modules.txt | 168 +-
.../cloud-provider-azure/pkg/azclient/LICENSE | 202 +
.../pkg/azclient/Makefile | 138 +
.../pkg/azclient/arm_conf.go | 53 +
.../cloud-provider-azure/pkg/azclient/auth.go | 242 +
.../availabilitysetclient/interface.go | 30 +
.../zz_generated_client.go | 68 +
.../pkg/azclient/cloud.go | 180 +
.../pkg/azclient/deploymentclient/custom.go | 59 +
.../azclient/deploymentclient/interface.go | 34 +
.../deploymentclient/zz_generated_client.go | 50 +
.../pkg/azclient/diskclient/interface.go | 32 +
.../diskclient/zz_generated_client.go | 86 +
.../pkg/azclient/factory.go | 68 +
.../pkg/azclient/factory_conf.go | 69 +
.../pkg/azclient/factory_gen.go | 532 +
.../pkg/azclient/interfaceclient/interface.go | 36 +
.../interfaceclient/zz_generated_client.go | 89 +
.../pkg/azclient/ipgroupclient/interface.go | 32 +
.../ipgroupclient/zz_generated_client.go | 89 +
.../azclient/loadbalancerclient/interface.go | 35 +
.../loadbalancerclient/zz_generated_client.go | 89 +
.../managedclusterclient/interface.go | 32 +
.../zz_generated_client.go | 86 +
.../policy/ratelimit/flowcontrol/docs.go | 18 +
.../policy/ratelimit/flowcontrol/throttle.go | 192 +
.../azclient/policy/ratelimit/ratelimit.go | 101 +
.../policy/retryrepectthrottled/throttle.go | 88 +
.../privateendpointclient/interface.go | 30 +
.../zz_generated_client.go | 70 +
.../privatelinkserviceclient/interface.go | 32 +
.../zz_generated_client.go | 89 +
.../azclient/privatezoneclient/interface.go | 30 +
.../privatezoneclient/zz_generated_client.go | 67 +
.../publicipaddressclient/interface.go | 32 +
.../zz_generated_client.go | 89 +
.../publicipprefixclient/interface.go | 32 +
.../zz_generated_client.go | 89 +
.../azclient/resourcegroupclient/custom.go | 58 +
.../azclient/resourcegroupclient/interface.go | 32 +
.../zz_generated_client.go | 42 +
.../azclient/routetableclient/interface.go | 30 +
.../routetableclient/zz_generated_client.go | 62 +
.../azclient/securitygroupclient/interface.go | 32 +
.../zz_generated_client.go | 86 +
.../pkg/azclient/snapshotclient/custom.go | 36 +
.../pkg/azclient/snapshotclient/interface.go | 32 +
.../snapshotclient/zz_generated_client.go | 73 +
.../sshpublickeyresourceclient/custom.go | 53 +
.../sshpublickeyresourceclient/interface.go | 37 +
.../zz_generated_client.go | 68 +
.../pkg/azclient/subnetclient/interface.go | 32 +
.../subnetclient/zz_generated_client.go | 89 +
.../azclient/utils/armbalancer/armbalancer.go | 66 +
.../armbalancer/host_scoped_transport.go | 63 +
.../azclient/utils/armbalancer/transport.go | 36 +
.../utils/armbalancer/transport_pool.go | 160 +
.../pkg/azclient/utils/const.go | 24 +
.../pkg/azclient/utils/interface.go | 69 +
.../pkg/azclient/utils/options.go | 58 +
.../pkg/azclient/utils/policy_wrapper.go | 33 +
.../pkg/azclient/utils/poller.go | 55 +
.../pkg/azclient/utils/transport.go | 48 +
.../azclient/virtualmachineclient/custom.go | 39 +
.../virtualmachineclient/interface.go | 32 +
.../zz_generated_client.go | 75 +
.../virtualmachinescalesetclient/interface.go | 32 +
.../zz_generated_client.go | 86 +
.../virtualmachinescalesetvmclient/custom.go | 99 +
.../interface.go | 35 +
.../zz_generated_client.go | 61 +
.../virtualnetworkclient/interface.go | 32 +
.../zz_generated_client.go | 89 +
1014 files changed, 236216 insertions(+), 97964 deletions(-)
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/doc.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource/resource_identifier.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource/resource_type.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/policy/policy.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/resource_identifier.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/resource_type.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/pipeline.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/policy_bearer_token.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/policy_register_rp.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/runtime.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_key_credential.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_sas_credential.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_dialer_other.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_dialer_wasm.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/to/doc.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/to/to.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/assets.json
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/confidential_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/on_behalf_of_credential.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/public_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources-pre.ps1
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources.bicep
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/workload_identity.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/exported/exported.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/poller/util.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/CHANGELOG.md
rename vendor/github.com/{mitchellh/mapstructure/LICENSE => Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/LICENSE.txt} (85%)
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/README.md
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/assets.json
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/autorest.md
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/availabilitysets_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/build.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/capacityreservationgroups_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/capacityreservations_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/ci.yml
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/client_factory.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/cloudserviceoperatingsystems_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/cloudserviceroleinstances_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/cloudserviceroles_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/cloudservices_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/cloudservicesupdatedomain_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/communitygalleries_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/communitygalleryimages_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/communitygalleryimageversions_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/constants.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/dedicatedhostgroups_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/dedicatedhosts_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/diskaccesses_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/diskencryptionsets_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/diskrestorepoint_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/disks_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/galleries_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/galleryapplications_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/galleryapplicationversions_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/galleryimages_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/galleryimageversions_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/gallerysharingprofile_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/images_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/loganalytics_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/models.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/models_serde.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/operations_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/options.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/proximityplacementgroups_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/resourceskus_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/response_types.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/restorepointcollections_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/restorepoints_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/sharedgalleries_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/sharedgalleryimages_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/sharedgalleryimageversions_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/snapshots_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/sshpublickeys_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/time_rfc3339.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/usage_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachineextensionimages_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachineextensions_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachineimages_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachineimagesedgezone_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachineruncommands_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachines_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachinescalesetextensions_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachinescalesetrollingupgrades_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachinescalesets_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachinescalesetvmextensions_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachinescalesetvmruncommands_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachinescalesetvms_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/virtualmachinesizes_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/CHANGELOG.md
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/LICENSE.txt
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/README.md
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/agentpools_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/assets.json
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/autorest.md
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/build.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/ci.yml
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/client_factory.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/constants.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/date_type.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/maintenanceconfigurations_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/managedclusters_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/models.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/models_serde.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/operations_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/options.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/privateendpointconnections_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/privatelinkresources_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/resolveprivatelinkserviceid_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/response_types.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/snapshots_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4/time_rfc3339.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/CHANGELOG.md
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/LICENSE.txt
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/README.md
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/adminrulecollections_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/adminrules_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/applicationgatewayprivateendpointconnections_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/applicationgatewayprivatelinkresources_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/applicationgateways_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/applicationgatewaywafdynamicmanifests_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/applicationgatewaywafdynamicmanifestsdefault_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/applicationsecuritygroups_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/assets.json
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/autorest.md
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/availabledelegations_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/availableendpointservices_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/availableprivateendpointtypes_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/availableresourcegroupdelegations_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/availableservicealiases_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/azurefirewallfqdntags_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/azurefirewalls_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/bastionhosts_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/bgpservicecommunities_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/build.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/ci.yml
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/client_factory.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/configurationpolicygroups_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/connectionmonitors_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/connectivityconfigurations_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/constants.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/customipprefixes_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/ddoscustompolicies_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/ddosprotectionplans_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/defaultsecurityrules_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/dscpconfiguration_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/expressroutecircuitauthorizations_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/expressroutecircuitconnections_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/expressroutecircuitpeerings_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/expressroutecircuits_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/expressrouteconnections_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/expressroutecrossconnectionpeerings_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/expressroutecrossconnections_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/expressroutegateways_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/expressroutelinks_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/expressrouteportauthorizations_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/expressrouteports_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/expressrouteportslocations_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/expressrouteproviderportslocation_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/expressrouteserviceproviders_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/firewallpolicies_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/firewallpolicyidpssignatures_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/firewallpolicyidpssignaturesfiltervalues_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/firewallpolicyidpssignaturesoverrides_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/firewallpolicyrulecollectiongroups_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/flowlogs_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/groups_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/hubroutetables_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/hubvirtualnetworkconnections_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/inboundnatrules_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/inboundsecurityrule_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/interfaceipconfigurations_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/interfaceloadbalancers_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/interfaces.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/interfaces_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/interfacetapconfigurations_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/ipallocations_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/ipgroups_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/loadbalancerbackendaddresspools_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/loadbalancerfrontendipconfigurations_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/loadbalancerloadbalancingrules_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/loadbalancernetworkinterfaces_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/loadbalanceroutboundrules_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/loadbalancerprobes_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/loadbalancers_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/localnetworkgateways_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/management_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/managementgroupnetworkmanagerconnections_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/managercommits_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/managerdeploymentstatus_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/managers_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/models.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/models_serde.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/natgateways_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/natrules_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/operations_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/options.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/p2svpngateways_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/packetcaptures_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/peerexpressroutecircuitconnections_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/polymorphic_helpers.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/privatednszonegroups_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/privateendpoints_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/privatelinkservices_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/profiles_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/publicipaddresses_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/publicipprefixes_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/resourcenavigationlinks_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/response_types.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/routefilterrules_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/routefilters_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/routemaps_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/routes_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/routetables_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/routingintent_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/scopeconnections_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/securityadminconfigurations_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/securitygroups_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/securitypartnerproviders_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/securityrules_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/serviceassociationlinks_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/serviceendpointpolicies_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/serviceendpointpolicydefinitions_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/servicetaginformation_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/servicetags_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/staticmembers_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/subnets_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/subscriptionnetworkmanagerconnections_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/time_rfc3339.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/usages_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/vipswap_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/virtualapplianceconnections_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/virtualappliances_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/virtualappliancesites_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/virtualapplianceskus_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/virtualhubbgpconnection_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/virtualhubbgpconnections_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/virtualhubipconfiguration_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/virtualhubroutetablev2s_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/virtualhubs_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/virtualnetworkgatewayconnections_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/virtualnetworkgatewaynatrules_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/virtualnetworkgateways_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/virtualnetworkpeerings_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/virtualnetworks_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/virtualnetworktaps_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/virtualrouterpeerings_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/virtualrouters_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/virtualwans_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/vpnconnections_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/vpngateways_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/vpnlinkconnections_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/vpnserverconfigurations_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/vpnserverconfigurationsassociatedwithvirtualwan_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/vpnsitelinkconnections_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/vpnsitelinks_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/vpnsites_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/vpnsitesconfiguration_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/watchers_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/webapplicationfirewallpolicies_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/webcategories_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/privatedns/armprivatedns/CHANGELOG.md
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/privatedns/armprivatedns/LICENSE.txt
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/privatedns/armprivatedns/README.md
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/privatedns/armprivatedns/assets.json
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/privatedns/armprivatedns/autorest.md
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/privatedns/armprivatedns/build.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/privatedns/armprivatedns/ci.yml
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/privatedns/armprivatedns/client_factory.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/privatedns/armprivatedns/constants.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/privatedns/armprivatedns/models.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/privatedns/armprivatedns/models_serde.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/privatedns/armprivatedns/privatezones_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/privatedns/armprivatedns/recordsets_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/privatedns/armprivatedns/response_types.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/privatedns/armprivatedns/virtualnetworklinks_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources/CHANGELOG.md
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources/LICENSE.txt
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources/README.md
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources/assets.json
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources/autorest.md
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources/build.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources/ci.yml
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources/client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources/client_factory.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources/constants.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources/deploymentoperations_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources/deployments_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources/models.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources/models_serde.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources/operations_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources/providerresourcetypes_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources/providers_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources/resourcegroups_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources/response_types.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources/tags_client.go
create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources/time_rfc3339.go
delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/CHANGELOG.md
delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/_meta.json
delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/applicationgateways.go
delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/applicationsecuritygroups.go
delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/availabledelegations.go
delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/availableendpointservices.go
delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/availableprivateendpointtypes.go
delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/availableresourcegroupdelegations.go
delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/azurefirewallfqdntags.go
delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/azurefirewalls.go
delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/bastionhosts.go
delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/bgpservicecommunities.go
delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/client.go
delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/connectionmonitors.go
delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/ddoscustompolicies.go
delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/ddosprotectionplans.go
delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/defaultsecurityrules.go
delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/enums.go
delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/expressroutecircuitauthorizations.go
delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/expressroutecircuitconnections.go
delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/expressroutecircuitpeerings.go
delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/expressroutecircuits.go
delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/expressrouteconnections.go
delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/expressroutecrossconnectionpeerings.go
delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/expressroutecrossconnections.go
delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/expressroutegateways.go
delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/expressroutelinks.go
delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/expressrouteports.go
delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/expressrouteportslocations.go
delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/expressrouteserviceproviders.go
delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/firewallpolicies.go
delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/firewallpolicyrulegroups.go
delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/hubvirtualnetworkconnections.go
delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/inboundnatrules.go
delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/interfaceipconfigurations.go
delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/interfaceloadbalancers.go
delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/interfacesgroup.go
delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/interfacetapconfigurations.go
delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/loadbalancerbackendaddresspools.go
delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/loadbalancerfrontendipconfigurations.go
delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/loadbalancerloadbalancingrules.go
delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/loadbalancernetworkinterfaces.go
delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/loadbalanceroutboundrules.go
delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/loadbalancerprobes.go
delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/loadbalancers.go
delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/localnetworkgateways.go
delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/models.go
delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/natgateways.go
delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/operations.go
delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/p2svpngateways.go
delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/p2svpnserverconfigurations.go
delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/packetcaptures.go
delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/peerexpressroutecircuitconnections.go
delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/privateendpoints.go
delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/privatelinkservices.go
delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/profiles.go
delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/publicipaddresses.go
delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/publicipprefixes.go
delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/resourcenavigationlinks.go
delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/routefilterrules.go
delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/routefilters.go
delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/routes.go
delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/routetables.go
delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/securitygroups.go
delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/securityrules.go
delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/serviceassociationlinks.go
delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/serviceendpointpolicies.go
delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/serviceendpointpolicydefinitions.go
delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/servicetags.go
delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/subnets.go
delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/usages.go
delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/version.go
delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/virtualhubs.go
delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/virtualnetworkgatewayconnections.go
delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/virtualnetworkgateways.go
delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/virtualnetworkpeerings.go
delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/virtualnetworks.go
delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/virtualnetworktaps.go
delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/virtualwans.go
delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/vpnconnections.go
delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/vpngateways.go
delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/vpnlinkconnections.go
delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/vpnsitelinkconnections.go
delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/vpnsitelinks.go
delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/vpnsites.go
delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/vpnsitesconfiguration.go
delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/watchers.go
delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/webapplicationfirewallpolicies.go
delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2018-05-01/resources/CHANGELOG.md
delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2018-05-01/resources/_meta.json
delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2018-05-01/resources/client.go
delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2018-05-01/resources/deploymentoperations.go
delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2018-05-01/resources/deployments.go
delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2018-05-01/resources/enums.go
delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2018-05-01/resources/groups.go
delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2018-05-01/resources/models.go
delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2018-05-01/resources/operations.go
delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2018-05-01/resources/providers.go
delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2018-05-01/resources/resources.go
delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2018-05-01/resources/tags.go
delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2018-05-01/resources/version.go
delete mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/test_serialized_cache.json
create mode 100644 vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/options/options.go
rename vendor/github.com/antlr/antlr4/runtime/Go/antlr/{ => v4}/LICENSE (100%)
create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/antlrdoc.go
rename vendor/github.com/antlr/antlr4/runtime/Go/antlr/{ => v4}/atn.go (72%)
rename vendor/github.com/antlr/antlr4/runtime/Go/antlr/{ => v4}/atn_config.go (84%)
rename vendor/github.com/antlr/antlr4/runtime/Go/antlr/{ => v4}/atn_config_set.go (81%)
rename vendor/github.com/antlr/antlr4/runtime/Go/antlr/{ => v4}/atn_deserialization_options.go (96%)
rename vendor/github.com/antlr/antlr4/runtime/Go/antlr/{ => v4}/atn_deserializer.go (99%)
rename vendor/github.com/antlr/antlr4/runtime/Go/antlr/{ => v4}/atn_simulator.go (94%)
rename vendor/github.com/antlr/antlr4/runtime/Go/antlr/{ => v4}/atn_state.go (97%)
rename vendor/github.com/antlr/antlr4/runtime/Go/antlr/{ => v4}/atn_type.go (79%)
rename vendor/github.com/antlr/antlr4/runtime/Go/antlr/{ => v4}/char_stream.go (82%)
rename vendor/github.com/antlr/antlr4/runtime/Go/antlr/{ => v4}/common_token_factory.go (96%)
rename vendor/github.com/antlr/antlr4/runtime/Go/antlr/{ => v4}/common_token_stream.go (98%)
create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/comparators.go
rename vendor/github.com/antlr/antlr4/runtime/Go/antlr/{ => v4}/dfa.go (80%)
rename vendor/github.com/antlr/antlr4/runtime/Go/antlr/{ => v4}/dfa_serializer.go (97%)
rename vendor/github.com/antlr/antlr4/runtime/Go/antlr/{ => v4}/dfa_state.go (90%)
rename vendor/github.com/antlr/antlr4/runtime/Go/antlr/{ => v4}/diagnostic_error_listener.go (98%)
rename vendor/github.com/antlr/antlr4/runtime/Go/antlr/{ => v4}/error_listener.go (98%)
rename vendor/github.com/antlr/antlr4/runtime/Go/antlr/{ => v4}/error_strategy.go (99%)
rename vendor/github.com/antlr/antlr4/runtime/Go/antlr/{ => v4}/errors.go (98%)
rename vendor/github.com/antlr/antlr4/runtime/Go/antlr/{ => v4}/file_stream.go (92%)
rename vendor/github.com/antlr/antlr4/runtime/Go/antlr/{ => v4}/input_stream.go (96%)
rename vendor/github.com/antlr/antlr4/runtime/Go/antlr/{ => v4}/int_stream.go (82%)
rename vendor/github.com/antlr/antlr4/runtime/Go/antlr/{ => v4}/interval_set.go (98%)
create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/jcollect.go
rename vendor/github.com/antlr/antlr4/runtime/Go/antlr/{ => v4}/lexer.go (98%)
rename vendor/github.com/antlr/antlr4/runtime/Go/antlr/{ => v4}/lexer_action.go (91%)
rename vendor/github.com/antlr/antlr4/runtime/Go/antlr/{ => v4}/lexer_action_executor.go (88%)
rename vendor/github.com/antlr/antlr4/runtime/Go/antlr/{ => v4}/lexer_atn_simulator.go (98%)
rename vendor/github.com/antlr/antlr4/runtime/Go/antlr/{ => v4}/ll1_analyzer.go (87%)
rename vendor/github.com/antlr/antlr4/runtime/Go/antlr/{ => v4}/parser.go (99%)
rename vendor/github.com/antlr/antlr4/runtime/Go/antlr/{ => v4}/parser_atn_simulator.go (94%)
rename vendor/github.com/antlr/antlr4/runtime/Go/antlr/{ => v4}/parser_rule_context.go (98%)
rename vendor/github.com/antlr/antlr4/runtime/Go/antlr/{ => v4}/prediction_context.go (81%)
rename vendor/github.com/antlr/antlr4/runtime/Go/antlr/{ => v4}/prediction_mode.go (95%)
rename vendor/github.com/antlr/antlr4/runtime/Go/antlr/{ => v4}/recognizer.go (92%)
rename vendor/github.com/antlr/antlr4/runtime/Go/antlr/{ => v4}/rule_context.go (97%)
rename vendor/github.com/antlr/antlr4/runtime/Go/antlr/{ => v4}/semantic_context.go (85%)
rename vendor/github.com/antlr/antlr4/runtime/Go/antlr/{ => v4}/token.go (98%)
rename vendor/github.com/antlr/antlr4/runtime/Go/antlr/{ => v4}/token_source.go (85%)
rename vendor/github.com/antlr/antlr4/runtime/Go/antlr/{ => v4}/token_stream.go (87%)
rename vendor/github.com/antlr/antlr4/runtime/Go/antlr/{ => v4}/tokenstream_rewriter.go (58%)
rename vendor/github.com/antlr/antlr4/runtime/Go/antlr/{ => v4}/trace_listener.go (93%)
rename vendor/github.com/antlr/antlr4/runtime/Go/antlr/{ => v4}/transition.go (99%)
rename vendor/github.com/antlr/antlr4/runtime/Go/antlr/{ => v4}/tree.go (98%)
rename vendor/github.com/antlr/antlr4/runtime/Go/antlr/{ => v4}/trees.go (93%)
rename vendor/github.com/antlr/antlr4/runtime/Go/antlr/{ => v4}/utils.go (94%)
rename vendor/github.com/antlr/antlr4/runtime/Go/antlr/{ => v4}/utils_set.go (80%)
create mode 100644 vendor/github.com/golang-jwt/jwt/v5/.gitignore
rename vendor/github.com/{jongio/azidext/go/azidext => golang-jwt/jwt/v5}/LICENSE (91%)
create mode 100644 vendor/github.com/golang-jwt/jwt/v5/MIGRATION_GUIDE.md
create mode 100644 vendor/github.com/golang-jwt/jwt/v5/README.md
create mode 100644 vendor/github.com/golang-jwt/jwt/v5/SECURITY.md
create mode 100644 vendor/github.com/golang-jwt/jwt/v5/VERSION_HISTORY.md
create mode 100644 vendor/github.com/golang-jwt/jwt/v5/claims.go
create mode 100644 vendor/github.com/golang-jwt/jwt/v5/doc.go
create mode 100644 vendor/github.com/golang-jwt/jwt/v5/ecdsa.go
create mode 100644 vendor/github.com/golang-jwt/jwt/v5/ecdsa_utils.go
create mode 100644 vendor/github.com/golang-jwt/jwt/v5/ed25519.go
create mode 100644 vendor/github.com/golang-jwt/jwt/v5/ed25519_utils.go
create mode 100644 vendor/github.com/golang-jwt/jwt/v5/errors.go
create mode 100644 vendor/github.com/golang-jwt/jwt/v5/errors_go1_20.go
create mode 100644 vendor/github.com/golang-jwt/jwt/v5/errors_go_other.go
create mode 100644 vendor/github.com/golang-jwt/jwt/v5/hmac.go
create mode 100644 vendor/github.com/golang-jwt/jwt/v5/map_claims.go
create mode 100644 vendor/github.com/golang-jwt/jwt/v5/none.go
create mode 100644 vendor/github.com/golang-jwt/jwt/v5/parser.go
create mode 100644 vendor/github.com/golang-jwt/jwt/v5/parser_option.go
create mode 100644 vendor/github.com/golang-jwt/jwt/v5/registered_claims.go
create mode 100644 vendor/github.com/golang-jwt/jwt/v5/rsa.go
create mode 100644 vendor/github.com/golang-jwt/jwt/v5/rsa_pss.go
create mode 100644 vendor/github.com/golang-jwt/jwt/v5/rsa_utils.go
create mode 100644 vendor/github.com/golang-jwt/jwt/v5/signing_method.go
create mode 100644 vendor/github.com/golang-jwt/jwt/v5/staticcheck.conf
create mode 100644 vendor/github.com/golang-jwt/jwt/v5/token.go
create mode 100644 vendor/github.com/golang-jwt/jwt/v5/token_option.go
create mode 100644 vendor/github.com/golang-jwt/jwt/v5/types.go
create mode 100644 vendor/github.com/golang-jwt/jwt/v5/validator.go
create mode 100644 vendor/github.com/google/cel-go/common/types/optional.go
rename vendor/github.com/google/cel-go/{interpreter/coster.go => common/types/traits/zeroer.go} (50%)
create mode 100644 vendor/github.com/google/cel-go/ext/bindings.go
create mode 100644 vendor/github.com/google/cel-go/ext/math.go
create mode 100644 vendor/github.com/google/cel-go/ext/native.go
create mode 100644 vendor/github.com/google/cel-go/ext/protos.go
create mode 100644 vendor/github.com/google/cel-go/ext/sets.go
create mode 100644 vendor/github.com/google/cel-go/interpreter/formatting.go
rename vendor/github.com/google/{gnostic => gnostic-models}/LICENSE (100%)
rename vendor/github.com/google/{gnostic => gnostic-models}/compiler/README.md (100%)
rename vendor/github.com/google/{gnostic => gnostic-models}/compiler/context.go (100%)
rename vendor/github.com/google/{gnostic => gnostic-models}/compiler/error.go (100%)
rename vendor/github.com/google/{gnostic => gnostic-models}/compiler/extensions.go (97%)
rename vendor/github.com/google/{gnostic => gnostic-models}/compiler/helpers.go (99%)
rename vendor/github.com/google/{gnostic => gnostic-models}/compiler/main.go (100%)
rename vendor/github.com/google/{gnostic => gnostic-models}/compiler/reader.go (100%)
rename vendor/github.com/google/{gnostic => gnostic-models}/extensions/README.md (100%)
rename vendor/github.com/google/{gnostic => gnostic-models}/extensions/extension.pb.go (99%)
rename vendor/github.com/google/{gnostic => gnostic-models}/extensions/extension.proto (100%)
rename vendor/github.com/google/{gnostic => gnostic-models}/extensions/extensions.go (100%)
rename vendor/github.com/google/{gnostic => gnostic-models}/jsonschema/README.md (100%)
rename vendor/github.com/google/{gnostic => gnostic-models}/jsonschema/base.go (90%)
rename vendor/github.com/google/{gnostic => gnostic-models}/jsonschema/display.go (100%)
rename vendor/github.com/google/{gnostic => gnostic-models}/jsonschema/models.go (100%)
rename vendor/github.com/google/{gnostic => gnostic-models}/jsonschema/operations.go (100%)
rename vendor/github.com/google/{gnostic => gnostic-models}/jsonschema/reader.go (100%)
rename vendor/github.com/google/{gnostic => gnostic-models}/jsonschema/schema.json (100%)
rename vendor/github.com/google/{gnostic => gnostic-models}/jsonschema/writer.go (100%)
rename vendor/github.com/google/{gnostic => gnostic-models}/openapiv2/OpenAPIv2.go (99%)
rename vendor/github.com/google/{gnostic => gnostic-models}/openapiv2/OpenAPIv2.pb.go (99%)
rename vendor/github.com/google/{gnostic => gnostic-models}/openapiv2/OpenAPIv2.proto (100%)
rename vendor/github.com/google/{gnostic => gnostic-models}/openapiv2/README.md (100%)
rename vendor/github.com/google/{gnostic => gnostic-models}/openapiv2/document.go (96%)
rename vendor/github.com/google/{gnostic => gnostic-models}/openapiv2/openapi-2.0.json (100%)
rename vendor/github.com/google/{gnostic => gnostic-models}/openapiv3/OpenAPIv3.go (99%)
rename vendor/github.com/google/{gnostic => gnostic-models}/openapiv3/OpenAPIv3.pb.go (99%)
rename vendor/github.com/google/{gnostic => gnostic-models}/openapiv3/OpenAPIv3.proto (100%)
rename vendor/github.com/google/{gnostic => gnostic-models}/openapiv3/README.md (100%)
rename vendor/github.com/google/{gnostic => gnostic-models}/openapiv3/document.go (96%)
delete mode 100644 vendor/github.com/google/gnostic/openapiv3/openapi-3.0.json
delete mode 100644 vendor/github.com/google/gnostic/openapiv3/openapi-3.1.json
delete mode 100644 vendor/github.com/jongio/azidext/go/azidext/azure_identity_credential_adapter.go
delete mode 100644 vendor/github.com/mitchellh/mapstructure/CHANGELOG.md
delete mode 100644 vendor/github.com/mitchellh/mapstructure/README.md
delete mode 100644 vendor/github.com/mitchellh/mapstructure/decode_hooks.go
delete mode 100644 vendor/github.com/mitchellh/mapstructure/error.go
delete mode 100644 vendor/github.com/mitchellh/mapstructure/mapstructure.go
create mode 100644 vendor/github.com/pelletier/go-toml/SECURITY.md
create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/vnext.go
create mode 100644 vendor/github.com/prometheus/common/model/value_float.go
create mode 100644 vendor/github.com/prometheus/common/model/value_histogram.go
create mode 100644 vendor/github.com/prometheus/common/model/value_type.go
create mode 100644 vendor/github.com/prometheus/procfs/cpuinfo_loong64.go
create mode 100644 vendor/github.com/prometheus/procfs/fs_statfs_notype.go
create mode 100644 vendor/github.com/prometheus/procfs/fs_statfs_type.go
create mode 100644 vendor/github.com/prometheus/procfs/net_wireless.go
create mode 100644 vendor/github.com/prometheus/procfs/proc_interrupts.go
create mode 100644 vendor/github.com/prometheus/procfs/thread.go
create mode 100644 vendor/golang.org/x/exp/LICENSE
create mode 100644 vendor/golang.org/x/exp/PATENTS
create mode 100644 vendor/golang.org/x/exp/constraints/constraints.go
create mode 100644 vendor/golang.org/x/exp/slices/slices.go
create mode 100644 vendor/golang.org/x/exp/slices/sort.go
create mode 100644 vendor/golang.org/x/exp/slices/zsortfunc.go
create mode 100644 vendor/golang.org/x/exp/slices/zsortordered.go
create mode 100644 vendor/golang.org/x/sync/errgroup/errgroup.go
create mode 100644 vendor/golang.org/x/sync/errgroup/go120.go
create mode 100644 vendor/golang.org/x/sync/errgroup/pre_go120.go
create mode 100644 vendor/golang.org/x/text/feature/plural/common.go
create mode 100644 vendor/golang.org/x/text/feature/plural/message.go
create mode 100644 vendor/golang.org/x/text/feature/plural/plural.go
create mode 100644 vendor/golang.org/x/text/feature/plural/tables.go
create mode 100644 vendor/golang.org/x/text/internal/catmsg/catmsg.go
create mode 100644 vendor/golang.org/x/text/internal/catmsg/codec.go
create mode 100644 vendor/golang.org/x/text/internal/catmsg/varint.go
create mode 100644 vendor/golang.org/x/text/internal/format/format.go
create mode 100644 vendor/golang.org/x/text/internal/format/parser.go
create mode 100644 vendor/golang.org/x/text/internal/internal.go
create mode 100644 vendor/golang.org/x/text/internal/match.go
create mode 100644 vendor/golang.org/x/text/internal/number/common.go
create mode 100644 vendor/golang.org/x/text/internal/number/decimal.go
create mode 100644 vendor/golang.org/x/text/internal/number/format.go
create mode 100644 vendor/golang.org/x/text/internal/number/number.go
create mode 100644 vendor/golang.org/x/text/internal/number/pattern.go
create mode 100644 vendor/golang.org/x/text/internal/number/roundingmode_string.go
create mode 100644 vendor/golang.org/x/text/internal/number/tables.go
create mode 100644 vendor/golang.org/x/text/internal/stringset/set.go
create mode 100644 vendor/golang.org/x/text/message/catalog.go
create mode 100644 vendor/golang.org/x/text/message/catalog/catalog.go
create mode 100644 vendor/golang.org/x/text/message/catalog/dict.go
create mode 100644 vendor/golang.org/x/text/message/catalog/go19.go
create mode 100644 vendor/golang.org/x/text/message/catalog/gopre19.go
create mode 100644 vendor/golang.org/x/text/message/doc.go
create mode 100644 vendor/golang.org/x/text/message/format.go
create mode 100644 vendor/golang.org/x/text/message/message.go
create mode 100644 vendor/golang.org/x/text/message/print.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/splice.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/util/dump/dump.go
create mode 100644 vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/versioncheck.go
create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/variable.go
create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/auditannotation.go
create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/expressionwarning.go
create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/matchresources.go
create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/namedrulewithoperations.go
create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/paramkind.go
create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/paramref.go
create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/typechecking.go
create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicy.go
create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicybinding.go
create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicybindingspec.go
create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicyspec.go
create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicystatus.go
create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validation.go
create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/variable.go
create mode 100644 vendor/k8s.io/client-go/applyconfigurations/core/v1/hostip.go
create mode 100644 vendor/k8s.io/client-go/applyconfigurations/core/v1/podresourceclaimstatus.go
delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicystatus.go
create mode 100644 vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/exemptprioritylevelconfiguration.go
create mode 100644 vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/exemptprioritylevelconfiguration.go
create mode 100644 vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/exemptprioritylevelconfiguration.go
create mode 100644 vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/exemptprioritylevelconfiguration.go
delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicystatus.go
create mode 100644 vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/validatingadmissionpolicy.go
create mode 100644 vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/validatingadmissionpolicybinding.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingadmissionpolicy.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingadmissionpolicybinding.go
create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/selfsubjectreview.go
create mode 100644 vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/validatingadmissionpolicy.go
create mode 100644 vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/validatingadmissionpolicybinding.go
create mode 100644 vendor/k8s.io/client-go/openapi/typeconverter.go
create mode 100644 vendor/k8s.io/client-go/tools/cache/object-names.go
create mode 100644 vendor/k8s.io/kube-openapi/pkg/builder/parameters.go
create mode 100644 vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/LICENSE
create mode 100644 vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/Makefile
create mode 100644 vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/arm_conf.go
create mode 100644 vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/auth.go
create mode 100644 vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/availabilitysetclient/interface.go
create mode 100644 vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/availabilitysetclient/zz_generated_client.go
create mode 100644 vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/cloud.go
create mode 100644 vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/deploymentclient/custom.go
create mode 100644 vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/deploymentclient/interface.go
create mode 100644 vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/deploymentclient/zz_generated_client.go
create mode 100644 vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/diskclient/interface.go
create mode 100644 vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/diskclient/zz_generated_client.go
create mode 100644 vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/factory.go
create mode 100644 vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/factory_conf.go
create mode 100644 vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/factory_gen.go
create mode 100644 vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/interfaceclient/interface.go
create mode 100644 vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/interfaceclient/zz_generated_client.go
create mode 100644 vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/ipgroupclient/interface.go
create mode 100644 vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/ipgroupclient/zz_generated_client.go
create mode 100644 vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/loadbalancerclient/interface.go
create mode 100644 vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/loadbalancerclient/zz_generated_client.go
create mode 100644 vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/managedclusterclient/interface.go
create mode 100644 vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/managedclusterclient/zz_generated_client.go
create mode 100644 vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/policy/ratelimit/flowcontrol/docs.go
create mode 100644 vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/policy/ratelimit/flowcontrol/throttle.go
create mode 100644 vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/policy/ratelimit/ratelimit.go
create mode 100644 vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/policy/retryrepectthrottled/throttle.go
create mode 100644 vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/privateendpointclient/interface.go
create mode 100644 vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/privateendpointclient/zz_generated_client.go
create mode 100644 vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/privatelinkserviceclient/interface.go
create mode 100644 vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/privatelinkserviceclient/zz_generated_client.go
create mode 100644 vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/privatezoneclient/interface.go
create mode 100644 vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/privatezoneclient/zz_generated_client.go
create mode 100644 vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/publicipaddressclient/interface.go
create mode 100644 vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/publicipaddressclient/zz_generated_client.go
create mode 100644 vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/publicipprefixclient/interface.go
create mode 100644 vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/publicipprefixclient/zz_generated_client.go
create mode 100644 vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/resourcegroupclient/custom.go
create mode 100644 vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/resourcegroupclient/interface.go
create mode 100644 vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/resourcegroupclient/zz_generated_client.go
create mode 100644 vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/routetableclient/interface.go
create mode 100644 vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/routetableclient/zz_generated_client.go
create mode 100644 vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/securitygroupclient/interface.go
create mode 100644 vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/securitygroupclient/zz_generated_client.go
create mode 100644 vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/snapshotclient/custom.go
create mode 100644 vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/snapshotclient/interface.go
create mode 100644 vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/snapshotclient/zz_generated_client.go
create mode 100644 vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/sshpublickeyresourceclient/custom.go
create mode 100644 vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/sshpublickeyresourceclient/interface.go
create mode 100644 vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/sshpublickeyresourceclient/zz_generated_client.go
create mode 100644 vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/subnetclient/interface.go
create mode 100644 vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/subnetclient/zz_generated_client.go
create mode 100644 vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils/armbalancer/armbalancer.go
create mode 100644 vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils/armbalancer/host_scoped_transport.go
create mode 100644 vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils/armbalancer/transport.go
create mode 100644 vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils/armbalancer/transport_pool.go
create mode 100644 vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils/const.go
create mode 100644 vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils/interface.go
create mode 100644 vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils/options.go
create mode 100644 vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils/policy_wrapper.go
create mode 100644 vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils/poller.go
create mode 100644 vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils/transport.go
create mode 100644 vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachineclient/custom.go
create mode 100644 vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachineclient/interface.go
create mode 100644 vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachineclient/zz_generated_client.go
create mode 100644 vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachinescalesetclient/interface.go
create mode 100644 vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachinescalesetclient/zz_generated_client.go
create mode 100644 vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachinescalesetvmclient/custom.go
create mode 100644 vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachinescalesetvmclient/interface.go
create mode 100644 vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachinescalesetvmclient/zz_generated_client.go
create mode 100644 vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualnetworkclient/interface.go
create mode 100644 vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualnetworkclient/zz_generated_client.go
diff --git a/go.mod b/go.mod
index cbca2955d3..353986e1d3 100644
--- a/go.mod
+++ b/go.mod
@@ -4,58 +4,63 @@ go 1.20
require (
github.com/Azure/azure-sdk-for-go v68.0.0+incompatible
- github.com/Azure/go-autorest/autorest v0.11.29
+ github.com/Azure/azure-sdk-for-go/sdk/azcore v1.8.0
+ github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.2.0
+ github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.2.0
+ github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.1.1
+ github.com/Azure/go-autorest/autorest v0.11.29 // indirect
github.com/Azure/go-autorest/autorest/adal v0.9.23 // indirect
github.com/Azure/go-autorest/autorest/date v0.3.0
- github.com/container-storage-interface/spec v1.7.0
+ github.com/container-storage-interface/spec v1.8.0
github.com/golang/mock v1.6.0
github.com/golang/protobuf v1.5.4
github.com/google/gofuzz v1.2.0 // indirect
github.com/imdario/mergo v0.3.11 // indirect
- github.com/kubernetes-csi/csi-lib-utils v0.13.0
- github.com/kubernetes-csi/csi-proxy/client v1.0.1
- github.com/kubernetes-csi/external-snapshotter/client/v4 v4.1.0
- github.com/onsi/gomega v1.27.8
- github.com/pborman/uuid v1.2.0
- github.com/pelletier/go-toml v1.9.4
+ github.com/kubernetes-csi/csi-lib-utils v0.15.0
+ github.com/kubernetes-csi/csi-proxy/client v1.1.3
+ github.com/kubernetes-csi/external-snapshotter/client/v4 v4.2.0
+ github.com/onsi/ginkgo/v2 v2.13.0
+ github.com/onsi/gomega v1.28.0
+ github.com/pborman/uuid v1.2.1
+ github.com/pelletier/go-toml v1.9.5
github.com/stretchr/testify v1.8.4
golang.org/x/net v0.24.0
google.golang.org/grpc v1.59.0
google.golang.org/protobuf v1.33.0
- k8s.io/api v0.27.13
- k8s.io/apimachinery v0.27.13
+ k8s.io/api v0.28.0
+ k8s.io/apimachinery v0.28.2
k8s.io/cloud-provider v0.27.13
- k8s.io/component-base v0.27.13
+ k8s.io/component-base v0.28.0
k8s.io/klog/v2 v2.100.1
k8s.io/kubernetes v1.27.13
k8s.io/mount-utils v0.27.6
- k8s.io/utils v0.0.0-20230209194617-a36077c30491
+ k8s.io/utils v0.0.0-20230505201702-9f6742963106
sigs.k8s.io/cloud-provider-azure v1.27.1-0.20230629023444-76504759ed59
+ sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.0-20231012171618-1890d8703623
sigs.k8s.io/yaml v1.3.0
)
require (
- github.com/Azure/azure-sdk-for-go/sdk/azcore v1.2.0
- github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.0
- github.com/jongio/azidext/go/azidext v0.4.0
- github.com/onsi/ginkgo/v2 v2.11.0
+ github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0 // indirect
golang.org/x/sys v0.19.0
- k8s.io/client-go v0.27.13
+ k8s.io/client-go v0.28.0
k8s.io/pod-security-admission v0.27.13
)
require (
- github.com/Azure/azure-sdk-for-go/sdk/internal v1.1.1 // indirect
+ github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 // indirect
+ github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4 v4.3.0 // indirect
+ github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/privatedns/armprivatedns v1.1.0 // indirect
github.com/Azure/go-autorest v14.2.0+incompatible // indirect
github.com/Azure/go-autorest/autorest/mocks v0.4.2 // indirect
github.com/Azure/go-autorest/autorest/to v0.4.0 // indirect
github.com/Azure/go-autorest/autorest/validation v0.3.1 // indirect
github.com/Azure/go-autorest/logger v0.2.1 // indirect
github.com/Azure/go-autorest/tracing v0.6.0 // indirect
- github.com/AzureAD/microsoft-authentication-library-for-go v0.7.0 // indirect
+ github.com/AzureAD/microsoft-authentication-library-for-go v1.1.1 // indirect
github.com/Microsoft/go-winio v0.4.17 // indirect
github.com/NYTimes/gziphandler v1.1.1 // indirect
- github.com/antlr/antlr4/runtime/Go/antlr v1.4.10 // indirect
+ github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df // indirect
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/blang/semver/v4 v4.0.0 // indirect
@@ -65,23 +70,24 @@ require (
github.com/coreos/go-systemd/v22 v22.4.0 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/docker/distribution v2.8.1+incompatible // indirect
- github.com/emicklei/go-restful/v3 v3.9.0 // indirect
+ github.com/emicklei/go-restful/v3 v3.10.2 // indirect
github.com/evanphx/json-patch v5.6.0+incompatible // indirect
github.com/felixge/httpsnoop v1.0.3 // indirect
github.com/fsnotify/fsnotify v1.6.0 // indirect
github.com/go-logr/logr v1.3.0 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-openapi/jsonpointer v0.19.6 // indirect
- github.com/go-openapi/jsonreference v0.20.1 // indirect
- github.com/go-openapi/swag v0.22.3 // indirect
+ github.com/go-openapi/jsonreference v0.20.2 // indirect
+ github.com/go-openapi/swag v0.22.4 // indirect
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang-jwt/jwt/v4 v4.5.0 // indirect
+ github.com/golang-jwt/jwt/v5 v5.0.0 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
- github.com/google/cel-go v0.12.7 // indirect
- github.com/google/gnostic v0.5.7-v3refs // indirect
+ github.com/google/cel-go v0.16.1 // indirect
+ github.com/google/gnostic-models v0.6.8 // indirect
github.com/google/go-cmp v0.6.0 // indirect
- github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 // indirect
+ github.com/google/pprof v0.0.0-20230602010524-ada837c32108 // indirect
github.com/google/uuid v1.3.1 // indirect
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect
@@ -90,8 +96,7 @@ require (
github.com/json-iterator/go v1.1.12 // indirect
github.com/kylelemons/godebug v1.1.0 // indirect
github.com/mailru/easyjson v0.7.7 // indirect
- github.com/matttproud/golang_protobuf_extensions v1.0.2 // indirect
- github.com/mitchellh/mapstructure v1.4.1 // indirect
+ github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
github.com/moby/spdystream v0.2.0 // indirect
github.com/moby/sys/mountinfo v0.6.2 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
@@ -102,10 +107,10 @@ require (
github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
- github.com/prometheus/client_golang v1.14.0 // indirect
- github.com/prometheus/client_model v0.3.0 // indirect
- github.com/prometheus/common v0.37.0 // indirect
- github.com/prometheus/procfs v0.8.0 // indirect
+ github.com/prometheus/client_golang v1.16.0 // indirect
+ github.com/prometheus/client_model v0.4.0 // indirect
+ github.com/prometheus/common v0.44.0 // indirect
+ github.com/prometheus/procfs v0.10.1 // indirect
github.com/spf13/cobra v1.7.0 // indirect
github.com/spf13/pflag v1.0.5 // indirect
github.com/stoewer/go-strcase v1.2.0 // indirect
@@ -125,6 +130,7 @@ require (
go.uber.org/multierr v1.6.0 // indirect
go.uber.org/zap v1.19.0 // indirect
golang.org/x/crypto v0.22.0 // indirect
+ golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e // indirect
golang.org/x/oauth2 v0.11.0 // indirect
golang.org/x/sync v0.5.0 // indirect
golang.org/x/term v0.19.0 // indirect
@@ -144,7 +150,7 @@ require (
k8s.io/component-helpers v0.27.13 // indirect
k8s.io/controller-manager v0.27.13 // indirect
k8s.io/kms v0.27.13 // indirect
- k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f // indirect
+ k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 // indirect
k8s.io/kubectl v0.0.0 // indirect
k8s.io/kubelet v0.27.13 // indirect
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.2 // indirect
@@ -157,7 +163,7 @@ replace (
go.etcd.io/etcd => go.etcd.io/etcd v0.0.0-20200410171415-59f5fb25a533
k8s.io/api => k8s.io/api v0.27.13
k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.27.13
- k8s.io/apimachinery => k8s.io/apimachinery v0.27.13
+ k8s.io/apimachinery => k8s.io/apimachinery v0.28.2
k8s.io/apiserver => k8s.io/apiserver v0.27.13
k8s.io/cli-runtime => k8s.io/cli-runtime v0.27.13
k8s.io/cloud-provider => k8s.io/cloud-provider v0.27.13
diff --git a/go.sum b/go.sum
index 9d3dea2b42..60eb513fa4 100644
--- a/go.sum
+++ b/go.sum
@@ -5,44 +5,35 @@ cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6A
cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
-cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
cloud.google.com/go v0.51.0/go.mod h1:hWtGJ6gnXH+KgDv+V0zFGDvpi07n3z8ZNj3T1RW0Gcw=
-cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
-cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
-cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=
-cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
-cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
-cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=
-cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
cloud.google.com/go v0.110.9 h1:e7ITSqGFFk4rbz/JFIqZh3G4VEHguhAL4BQcFlWtU68=
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
-cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
-cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
-cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
-cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
-cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
cloud.google.com/go/compute v1.23.2 h1:nWEMDhgbBkBJjfpVySqU4jgWdc22PLR0o4vEexZHers=
cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY=
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
-cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
-cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
-cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
-cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU=
cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
-cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
-cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
-cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
-cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
github.com/Azure/azure-sdk-for-go v68.0.0+incompatible h1:fcYLmCpyNYRnvJbPerq7U0hS+6+I79yEDJBqVNcqUzU=
github.com/Azure/azure-sdk-for-go v68.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
-github.com/Azure/azure-sdk-for-go/sdk/azcore v1.2.0 h1:sVW/AFBTGyJxDaMYlq0ct3jUXTtj12tQ6zE2GZUgVQw=
-github.com/Azure/azure-sdk-for-go/sdk/azcore v1.2.0/go.mod h1:uGG2W01BaETf0Ozp+QxxKJdMBNRWPdstHG0Fmdwn1/U=
-github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.0 h1:t/W5MYAuQy81cvM8VUNfRLzhtKpXhVUAN7Cd7KVbTyc=
-github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.0/go.mod h1:NBanQUfSWiWn3QEpWDTCU0IjBECKOYvl2R8xdRtMtiM=
-github.com/Azure/azure-sdk-for-go/sdk/internal v1.1.1 h1:Oj853U9kG+RLTCQXpjvOnrv0WaZHxgmZz1TlLywgOPY=
-github.com/Azure/azure-sdk-for-go/sdk/internal v1.1.1/go.mod h1:eWRD7oawr1Mu1sLCawqVc0CUiF43ia3qQMxLscsKQ9w=
+github.com/Azure/azure-sdk-for-go/sdk/azcore v1.8.0 h1:9kDVnTz3vbfweTqAUmk/a/pH5pWFCHtvRpHYC0G/dcA=
+github.com/Azure/azure-sdk-for-go/sdk/azcore v1.8.0/go.mod h1:3Ug6Qzto9anB6mGlEdgYMDF5zHQ+wwhEaYR4s17PHMw=
+github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0 h1:BMAjVKJM0U/CYF27gA0ZMmXGkOcvfFtD0oHVZ1TIPRI=
+github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0/go.mod h1:1fXstnBMas5kzG+S3q8UoJcmyU6nUeunJcMDHcRYHhs=
+github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 h1:sXr+ck84g/ZlZUOZiNELInmMgOsuGwdjjVkEIde0OtY=
+github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0/go.mod h1:okt5dMMTOFjX/aovMlrjvvXoPMBVSPzk9185BT0+eZM=
+github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.2.0 h1:PutmjTnIYf/rM5OlNGpAXcL+b2Fa2ErD5IsOjXEHYyg=
+github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.2.0/go.mod h1:c3iwOnL5Xq5K9ZOvxBrfZYD4pBDNTGK5b7ptkHN6SDs=
+github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4 v4.3.0 h1:U73ZEM5QTwb7x/VrXLTi+sb6Aw9DqFJxOpWuj+pDPfk=
+github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4 v4.3.0/go.mod h1:WpiaNrHqgIy+P5gTYbOA/JuMmxq7uq8onUvVBybjIlI=
+github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal v1.1.2 h1:mLY+pNLjCUeKhgnAJWAKhEUQM+RJQo2H1fuGSw1Ky1E=
+github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/managementgroups/armmanagementgroups v1.0.0 h1:pPvTJ1dY0sA35JOeFq6TsY2xj6Z85Yo23Pj4wCCvu4o=
+github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.2.0 h1:iGj7n4SmssnseLryJRs/0lb4Db129ioYOCPSPC+vEsw=
+github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.2.0/go.mod h1:qeBrdANBgW4QsU1bF5/9qjrPRwFIt+AnOMxyH5Bwkhk=
+github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/privatedns/armprivatedns v1.1.0 h1:rR8ZW79lE/ppfXTfiYSnMFv5EzmVuY4pfZWIkscIJ64=
+github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/privatedns/armprivatedns v1.1.0/go.mod h1:y2zXtLSMM/X5Mfawq0lOftpWn3f4V6OCsRdINsvWBPI=
+github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.1.1 h1:7CBQ+Ei8SP2c6ydQTGCCrS35bDxgTMfoP2miAwK++OU=
+github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.1.1/go.mod h1:c/wcGeGx5FUPbM/JltUYHZcKmigwyVLJlDq+4HdtXaw=
github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs=
github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI=
@@ -74,8 +65,8 @@ github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZ
github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk=
github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo=
github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
-github.com/AzureAD/microsoft-authentication-library-for-go v0.7.0 h1:VgSJlZH5u0k2qxSpqyghcFQKmvYckj46uymKK5XzkBM=
-github.com/AzureAD/microsoft-authentication-library-for-go v0.7.0/go.mod h1:BDJ5qMFKx9DugEg3+uQSDCdbYPr5s9vBTrL9P8TpqOU=
+github.com/AzureAD/microsoft-authentication-library-for-go v1.1.1 h1:WpB/QDNLpMw72xHJc34BNNykqSOeEJDAWkhf0u12/Jk=
+github.com/AzureAD/microsoft-authentication-library-for-go v1.1.1/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI=
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
@@ -85,21 +76,14 @@ github.com/Microsoft/go-winio v0.4.17/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOp
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I=
github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c=
-github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
-github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
-github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
-github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
-github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
-github.com/antlr/antlr4/runtime/Go/antlr v1.4.10 h1:yL7+Jz0jTC6yykIK/Wh74gnTJnrGr5AyrNMXuA0gves=
-github.com/antlr/antlr4/runtime/Go/antlr v1.4.10/go.mod h1:F7bn7fEU90QkQ3tnmaTx3LTKLEDqnwWODIYppRQ5hnY=
+github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df h1:7RFfzj4SSt6nnvCPbCqijJi1nWCd+TqAT3bYCStRC18=
+github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df/go.mod h1:pSwJ0fSY5KhvocuWSx4fz3BA8OrA1bQn+K1Eli3BRwM=
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA=
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
-github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
-github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM=
@@ -107,18 +91,16 @@ github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2y
github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM=
github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
-github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
-github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
-github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
+github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 h1:/inchEIKaYC1Akx+H+gqO04wryn5h75LSazbRlnya1k=
-github.com/container-storage-interface/spec v1.7.0 h1:gW8eyFQUZWWrMWa8p1seJ28gwDoN5CVJ4uAbQ+Hdycw=
-github.com/container-storage-interface/spec v1.7.0/go.mod h1:JYuzLqr9VVNoDJl44xp/8fmCOvWPDKzuGTwCoklhuqk=
+github.com/container-storage-interface/spec v1.8.0 h1:D0vhF3PLIZwlwZEf2eNbpujGCNwspwTYf2idJRJx4xI=
+github.com/container-storage-interface/spec v1.8.0/go.mod h1:ROLik+GhPslwwWRNFF1KasPzroNARibH2rfz1rkg4H0=
github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM=
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-systemd/v22 v22.4.0 h1:y9YHcjnjynCd/DVbg5j9L/33jQM3MxJlbj/zWskzfGU=
@@ -129,17 +111,18 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
-github.com/dnaeon/go-vcr v1.1.0 h1:ReYa/UBrRyQdant9B4fNHGoCNKw6qh6P0fsdGmZpR7c=
+github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI=
github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68=
github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=
github.com/emicklei/go-restful/v3 v3.8.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
-github.com/emicklei/go-restful/v3 v3.9.0 h1:XwGDlfxEnQZzuopoqxwSEllNcCOM9DhhFyhFIIGKwxE=
github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
+github.com/emicklei/go-restful/v3 v3.10.2 h1:hIovbnmBTLjHXkqEBUz3HGpXZdM7ZrE9fJIZIqlJLqE=
+github.com/emicklei/go-restful/v3 v3.10.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
-github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
+github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA=
github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
@@ -152,22 +135,13 @@ github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMo
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY=
github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw=
-github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
-github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
-github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
-github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
-github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
-github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0=
-github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
-github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
-github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
-github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY=
github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
@@ -175,44 +149,39 @@ github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre
github.com/go-logr/zapr v1.2.3 h1:a9vnzlIBPQBBkeaR9IuMUfmVOrQlkoC4YfPoFkX3T7A=
github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE=
github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs=
-github.com/go-openapi/jsonreference v0.20.1 h1:FBLnyygC4/IZZr893oiomc9XaghoveYTrLC1F86HID8=
github.com/go-openapi/jsonreference v0.20.1/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k=
-github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g=
+github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE=
+github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k=
github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
-github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
+github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU=
+github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
-github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg=
github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg=
github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
+github.com/golang-jwt/jwt/v5 v5.0.0 h1:1n1XNM9hk7O9mnQoNBGolZvzebBQ7p93ULHRc28XJUE=
+github.com/golang-jwt/jwt/v5 v5.0.0/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
-github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
-github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
-github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
-github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
-github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc=
github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
-github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
-github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
@@ -229,18 +198,16 @@ github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4=
-github.com/google/cel-go v0.12.7 h1:jM6p55R0MKBg79hZjn1zs2OlrywZ1Vk00rxVvad1/O0=
-github.com/google/cel-go v0.12.7/go.mod h1:Jk7ljRzLBhkmiAwBoUxB1sZSCVBAzkqPF25olK/iRDw=
-github.com/google/gnostic v0.5.7-v3refs h1:FhTMOKj2VhjpouxvWJAV1TL304uMlb9zcDqkl6cEI54=
+github.com/google/cel-go v0.16.1 h1:3hZfSNiAU3KOiNtxuFXVp5WFy4hf/Ly3Sa4/7F8SXNo=
+github.com/google/cel-go v0.16.1/go.mod h1:HXZKzB0LXqer5lHHgfWAnlYwJaQBDKMjxjulNQzhwhY=
github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ=
+github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I=
+github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
@@ -251,17 +218,13 @@ github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
-github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
-github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
-github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
-github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
-github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
-github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec=
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20230602010524-ada837c32108 h1:y+JfwMOPwQwIrnh3TUPwwtOAhONoppkHiSa4sQBoK2k=
+github.com/google/pprof v0.0.0-20230602010524-ada837c32108/go.mod h1:Jh3hGz2jkYak8qXPD19ryItVnUgpgeqzdkY/D0EaeuA=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
@@ -291,53 +254,40 @@ github.com/imdario/mergo v0.3.11 h1:3tnifQM4i+fbajXKBHXWEH+KvNHqojZ778UH75j3bGA=
github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
-github.com/joho/godotenv v1.3.0 h1:Zjp+RcGpHhGlrMbJzXTrZZPrWj+1vfm90La1wgB6Bhc=
github.com/jonboulle/clockwork v0.2.2 h1:UOGuzwb1PwsrDAObMuhUnj0p5ULPj8V/xJ7Kx9qUBdQ=
-github.com/jongio/azidext/go/azidext v0.4.0 h1:TOYyVFMeWGgXNhURSgrEtUCu7JAAKgsy+5C4+AEfYlw=
-github.com/jongio/azidext/go/azidext v0.4.0/go.mod h1:VrlpGde5B+pPbTUxnThE5UIQQkcebdr3jrC2MmlMVSI=
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
-github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
-github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
-github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
-github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
-github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
-github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
-github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
-github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
+github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
-github.com/kubernetes-csi/csi-lib-utils v0.13.0 h1:QrTdZVZbHlaSUBN9ReayBPnnF1N0edFIpUKBwVIBW3w=
-github.com/kubernetes-csi/csi-lib-utils v0.13.0/go.mod h1:JS9eDIZmSjx4F9o0bLTVK/qfhIIOifdjEfVXzxWapfE=
-github.com/kubernetes-csi/csi-proxy/client v1.0.1 h1:BPK9e5Fy0GcDRjDc9hqu7TnouSRujG6IvbH+PXSDOsY=
-github.com/kubernetes-csi/csi-proxy/client v1.0.1/go.mod h1:URLOkEbRhOwKVvGvug6HSKRTpLSFuQ/Gt3xahDag8qc=
-github.com/kubernetes-csi/external-snapshotter/client/v4 v4.1.0 h1:DecASDOSUnp0ftwi4aU87joEpZfLv9iMPwNYzrGb9Lc=
-github.com/kubernetes-csi/external-snapshotter/client/v4 v4.1.0/go.mod h1:YBCo4DoEeDndqvAn6eeu0vWM7QdXmHEeI9cFWplmBys=
+github.com/kubernetes-csi/csi-lib-utils v0.15.0 h1:YTMO6WilRUmjGh5/73kF4KjNcXev+V37O4bx8Uoxy5A=
+github.com/kubernetes-csi/csi-lib-utils v0.15.0/go.mod h1:fsoR7g1fOfl1z0WDpA1WvWPtt4oVvgzChgSUgR3JWDw=
+github.com/kubernetes-csi/csi-proxy/client v1.1.3 h1:FdGU7NtxGhQX2wTfnuscmThG920hq0OaVVpuJW9t2k0=
+github.com/kubernetes-csi/csi-proxy/client v1.1.3/go.mod h1:SfK4HVKQdMH5KrffivddAWgX5hl3P5KmnuOTBbDNboU=
+github.com/kubernetes-csi/external-snapshotter/client/v4 v4.2.0 h1:nHHjmvjitIiyPlUHk/ofpgvBcNcawJLtf4PYHORLjAA=
+github.com/kubernetes-csi/external-snapshotter/client/v4 v4.2.0/go.mod h1:YBCo4DoEeDndqvAn6eeu0vWM7QdXmHEeI9cFWplmBys=
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
-github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
-github.com/matttproud/golang_protobuf_extensions v1.0.2 h1:hAHbPm5IJGijwng3PWk09JkG9WeqChjprR5s9bBZ+OM=
-github.com/matttproud/golang_protobuf_extensions v1.0.2/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
+github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
+github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
-github.com/mitchellh/mapstructure v1.4.1 h1:CpVNEelQCZBooIPDn+AR3NpivK/TIKU8bDxdASFVQag=
-github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8=
github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c=
github.com/moby/sys/mountinfo v0.6.2 h1:BzJjoreD5BMFNmD9Rus6gdd1pLuecOFPt8wC+Vygl78=
@@ -345,15 +295,11 @@ github.com/moby/sys/mountinfo v0.6.2/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGp
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
-github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
-github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
-github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
-github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
@@ -370,8 +316,10 @@ github.com/onsi/ginkgo/v2 v2.7.0/go.mod h1:yjiuMwPokqY1XauOgju45q3sJt6VzQ/Fict1L
github.com/onsi/ginkgo/v2 v2.8.1/go.mod h1:N1/NbDngAFcSLdyZ+/aYTYGSlq9qMCS/cNKGJjy+csc=
github.com/onsi/ginkgo/v2 v2.9.0/go.mod h1:4xkjoL/tZv4SMWeww56BU5kAt19mVB47gTWxmrTcxyk=
github.com/onsi/ginkgo/v2 v2.9.1/go.mod h1:FEcmzVcCHl+4o9bQZVab+4dC9+j+91t2FHSzmGAPfuo=
-github.com/onsi/ginkgo/v2 v2.11.0 h1:WgqUCUt/lT6yXoQ8Wef0fsNn5cAuMK7+KT9UFRz2tcU=
-github.com/onsi/ginkgo/v2 v2.11.0/go.mod h1:ZhrRA5XmEE3x3rhlzamx/JJvujdZoJ2uvgI7kR0iZvM=
+github.com/onsi/ginkgo/v2 v2.9.2/go.mod h1:WHcJJG2dIlcCqVfBAwUCrJxSPFb6v4azBwgxeMeDuts=
+github.com/onsi/ginkgo/v2 v2.9.4/go.mod h1:gCQYp2Q+kSoIj7ykSVb9nskRSsR6PUj4AiLywzIhbKM=
+github.com/onsi/ginkgo/v2 v2.13.0 h1:0jY9lJquiL8fcf3M4LAXN5aMlS/b2BV86HFFPCPMgE4=
+github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
@@ -385,60 +333,43 @@ github.com/onsi/gomega v1.26.0/go.mod h1:r+zV744Re+DiYCIPRlYOTxn0YkOLcAnW8k1xXdM
github.com/onsi/gomega v1.27.1/go.mod h1:aHX5xOykVYzWOV4WqQy0sy8BQptgukenXpCXfadcIAw=
github.com/onsi/gomega v1.27.3/go.mod h1:5vG284IBtfDAmDyrK+eGyZmUgUlmi+Wngqo557cZ6Gw=
github.com/onsi/gomega v1.27.4/go.mod h1:riYq/GJKh8hhoM01HN6Vmuy93AarCXCBGpvFDK3q3fQ=
-github.com/onsi/gomega v1.27.8 h1:gegWiwZjBsf2DgiSbf5hpokZ98JVDMcWkUiigk6/KXc=
-github.com/onsi/gomega v1.27.8/go.mod h1:2J8vzI/s+2shY9XHRApDkdgPo1TKT7P2u6fXeJKFnNQ=
+github.com/onsi/gomega v1.27.6/go.mod h1:PIQNjfQwkP3aQAH7lf7j87O/5FiNr+ZR8+ipb+qQlhg=
+github.com/onsi/gomega v1.28.0 h1:i2rg/p9n/UqIDAMFUJ6qIUUMcsqOuUHgbpbu235Vr1c=
+github.com/onsi/gomega v1.28.0/go.mod h1:A1H2JE76sI14WIP57LMKj7FVfCHx3g3BcZVjJG8bjX8=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/opencontainers/selinux v1.10.0 h1:rAiKF8hTcgLI3w0DHm6i0ylVVcOrlgR1kK99DRLDhyU=
github.com/opencontainers/selinux v1.10.0/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI=
-github.com/pborman/uuid v1.2.0 h1:J7Q5mO4ysT1dv8hyrUGHb9+ooztCXu1D8MY8DZYsu3g=
-github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
-github.com/pelletier/go-toml v1.9.4 h1:tjENF6MfZAg8e4ZmZTeWaWiT2vXtsoO6+iuOjFhECwM=
-github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
+github.com/pborman/uuid v1.2.1 h1:+ZZIw58t/ozdjRaXh/3awHfmWRbzYxJoAdNJxe/3pvw=
+github.com/pborman/uuid v1.2.1/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
+github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8=
+github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU=
github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI=
-github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
-github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
-github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
-github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
-github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
-github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw=
-github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y=
-github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
-github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8=
+github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4=
-github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w=
-github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
-github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
-github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
-github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
-github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE=
-github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA=
-github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
-github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
-github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
-github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
-github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
-github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo=
-github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4=
+github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY=
+github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU=
+github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY=
+github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY=
+github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg=
+github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
+github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
+github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
-github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
-github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
-github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0=
github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js=
@@ -467,9 +398,7 @@ github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcU
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 h1:6fotK7otjonDflCTK0BCfls4SPy3NcCVb5dqqmbRknE=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8=
-github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
-github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
@@ -488,8 +417,6 @@ go.etcd.io/etcd/server/v3 v3.5.7 h1:BTBD8IJUV7YFgsczZMHhMTS67XuA4KpRquL0MFOJGRk=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
-go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
-go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1 h1:SpGay3w+nEwMpfVnbqOLH5gY52/foP8RE8UzTZ1pdSE=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1/go.mod h1:4UoMYEZOC0yN/sPGH76KPkkU7zgiEWYWL9vwmbnTJPE=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 h1:x8Z78aZx8cOF0+Kkazoc7lwUNMGy0LrzEMxTm4BbTxg=
@@ -516,7 +443,6 @@ go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4=
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
go.uber.org/zap v1.19.0 h1:mZQZefskPPCMIBCSEH0v2/iUqqLrYtaeqwD6FUGUnFE=
go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI=
-golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
@@ -527,6 +453,7 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y
golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
+golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio=
golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc=
golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
@@ -537,12 +464,9 @@ golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
-golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
-golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
-golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
-golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
-golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
+golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e h1:+WEEuIdZHnUeJJmEUjyYC2gfUMj69yZXw17EnHg/otA=
+golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e/go.mod h1:Kr81I6Kryrl9sr8s2FK3vxD90NdsKWRuOIl2O4CvYbA=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
@@ -553,14 +477,11 @@ golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHl
golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
-golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
-golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
-golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
@@ -570,12 +491,12 @@ golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI=
golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
+golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
@@ -583,33 +504,17 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
-golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
-golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
-golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
-golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
-golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
-golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
-golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
-golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
-golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
@@ -621,7 +526,9 @@ golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws=
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
+golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns=
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
+golang.org/x/net v0.13.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA=
golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk=
golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U=
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
@@ -633,8 +540,6 @@ golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4Iltr
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
golang.org/x/oauth2 v0.11.0 h1:vPL4xzxBM4niKCW6g9whtaWVXTJf1U5e4aZxxFx/gbU=
golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -643,10 +548,7 @@ golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -656,41 +558,22 @@ golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -698,12 +581,10 @@ golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
@@ -715,7 +596,9 @@ golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
@@ -730,14 +613,15 @@ golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA=
golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
+golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY=
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
+golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o=
golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU=
golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0=
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58=
golang.org/x/term v0.19.0 h1:+ThwsDv+tYfnJFhF4L8jITxu1tdTWRTZpdsWgEgjL6Q=
golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk=
-golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
@@ -750,6 +634,7 @@ golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
+golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
@@ -776,33 +661,11 @@ golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtn
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
-golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
-golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
-golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
-golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
-golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
@@ -812,6 +675,7 @@ golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA=
golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ=
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s=
+golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4=
golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58=
golang.org/x/tools v0.16.1 h1:TLyB3WofjdOEepBHAU20JdNC1Zbg87elYofWYAY5oZA=
golang.org/x/tools v0.16.1/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0=
@@ -824,24 +688,12 @@ google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEt
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
-google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
-google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
-google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
-google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
-google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
-google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
-google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
-google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
-google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
-google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
-google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
-google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c=
google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
@@ -852,27 +704,8 @@ google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRn
google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
-google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=
-google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
-google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
-google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20231030173426-d783a09b4405 h1:I6WNifs6pF9tNdSob2W24JtyxIYjzFB9qDlpUC76q+U=
google.golang.org/genproto v0.0.0-20231030173426-d783a09b4405/go.mod h1:3WDQMjmJk36UQhjQ89emUzb1mdaHcPeeAh4SCBKznB4=
@@ -887,11 +720,7 @@ google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyac
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
-google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
-google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
-google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
-google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
-google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
+google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk=
google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
@@ -908,14 +737,15 @@ google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp0
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
+google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
-gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
+gopkg.in/dnaeon/go-vcr.v3 v3.1.2 h1:F1smfXBqQqwpVifDfUBQG6zzaGjzT+EnVZakrOdr5wA=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
@@ -923,10 +753,8 @@ gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8=
gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
-gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
@@ -941,19 +769,17 @@ honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWh
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
-honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
-honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
k8s.io/api v0.27.13 h1:d49LYs1dh+JMMDNYQSu8FhEzCjc2TNpYvDWoSGAKs80=
k8s.io/api v0.27.13/go.mod h1:W3lYMPs34i0XQA+cmKfejve+HwbRZjy67fL05RyJUTo=
k8s.io/apiextensions-apiserver v0.27.13 h1:it32SCkrjzhimZasL++nsshG66m2O570y56R+xj1/WE=
k8s.io/apiextensions-apiserver v0.27.13/go.mod h1:LkAz0+pjqr/92kPigX/B2sjsPhGCuG+hi8GyyjUNNsE=
-k8s.io/apimachinery v0.27.13 h1:xDAnOWaRVNSkaKdfB0Ab11hixH90KGTbLwEHMloMjFM=
-k8s.io/apimachinery v0.27.13/go.mod h1:TWo+8wOIz3CytsrlI9k/LBWXLRr9dqf5hRSCbbggMAg=
+k8s.io/apimachinery v0.28.2 h1:KCOJLrc6gu+wV1BYgwik4AF4vXOlVJPdiqn0yAWWwXQ=
+k8s.io/apimachinery v0.28.2/go.mod h1:RdzF87y/ngqk9H4z3EL2Rppv5jj95vGS/HaFXrLDApU=
k8s.io/apiserver v0.27.13 h1:Yf69zVdbuQVIMpz7N4dtntWsUklKpcFXGAdVh7vKOH4=
k8s.io/apiserver v0.27.13/go.mod h1:XHth2MKAUdcLvdhPOwvDPbSyOrMev2vRqE05oUEC5Hk=
k8s.io/client-go v0.19.0/go.mod h1:H9E/VT95blcFQnlyShFgnFT9ZnJOAceiUHM3MlRC+mU=
-k8s.io/client-go v0.27.13 h1:SfUbIukb6BSqaadlYRX0AzMoN6+e+9FZGEKqfisidho=
-k8s.io/client-go v0.27.13/go.mod h1:I9SBaI28r6ii465Fb0dTpf5O3adOnDwNBoeqlDNbbFg=
+k8s.io/client-go v0.28.0 h1:ebcPRDZsCjpj62+cMk1eGNX1QkMdRmQ6lmz5BLoFWeM=
+k8s.io/client-go v0.28.0/go.mod h1:0Asy9Xt3U98RypWJmU1ZrRAGKhP6NqDPmptlAzK2kMc=
k8s.io/cloud-provider v0.27.13 h1:iOLPffa1PjCE9SpHIVrMfRQv8bBER+LGjJ3nbl69xkA=
k8s.io/cloud-provider v0.27.13/go.mod h1:Z6hm5I+K/1yQK4myP/PdP0NorxfM9p8iPEhyGvznw0I=
k8s.io/code-generator v0.27.13/go.mod h1:NmuMGweDQC7Ewx+c8zgbtVPLsy5r5Rs/+nQ7kuBwNbI=
@@ -974,8 +800,9 @@ k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg=
k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
k8s.io/kms v0.27.13 h1:mm0njb9VaDh2TNanqndUWGpe4c2elQbkPofoGPOalvM=
k8s.io/kms v0.27.13/go.mod h1:g3rvv+YDJ5xVEanhlnliJw4a9vZ/SL/AVw0/yQNwbEA=
-k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f h1:2kWPakN3i/k81b0gvD5C5FJ2kxm1WrQFanWchyKuqGg=
k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f/go.mod h1:byini6yhqGC14c3ebc/QwanvYwhuMWF6yz2F8uwW8eg=
+k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 h1:LyMgNKD2P8Wn1iAwQU5OhxCKlKJy0sHc+PcDwFB24dQ=
+k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9/go.mod h1:wZK2AVp1uHCp4VamDVgBP2COHZjqD1T68Rf0CM3YjSM=
k8s.io/kubectl v0.27.13 h1:RuygTTIy/FFotjI5kBA/DRamU9kSbCjUVlqd8Ni/U94=
k8s.io/kubectl v0.27.13/go.mod h1:wAoY7J5QWMX6xzeQ/rw9kZpadB4zUVePu1WzGv1GBsw=
k8s.io/kubelet v0.27.13 h1:tQ4bG3gfValJZDkVBCrnWrLbBRRlE3rA8gtC9LgTAag=
@@ -988,15 +815,17 @@ k8s.io/pod-security-admission v0.27.13 h1:B3sSlxhQZiPuEj6dD8Mc1oyjWR3/nUVZ9qXTvD
k8s.io/pod-security-admission v0.27.13/go.mod h1:d0ms/UYCmD0O28MA0MngozhKouM5xsKXFav4VGOpLr8=
k8s.io/utils v0.0.0-20200729134348-d5654de09c73/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
-k8s.io/utils v0.0.0-20230209194617-a36077c30491 h1:r0BAOLElQnnFhE/ApUsg3iHdVYYPBjNSSOMowRZxxsY=
k8s.io/utils v0.0.0-20230209194617-a36077c30491/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
+k8s.io/utils v0.0.0-20230406110748-d93618cff8a2/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
+k8s.io/utils v0.0.0-20230505201702-9f6742963106 h1:EObNQ3TW2D+WptiYXlApGNLVy0zm/JIBVY9i+M4wpAU=
+k8s.io/utils v0.0.0-20230505201702-9f6742963106/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
-rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
-rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.2 h1:trsWhjU5jZrx6UvFu4WzQDrN7Pga4a7Qg+zcfcj64PA=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.2/go.mod h1:+qG7ISXqCDVVcyO8hLn12AKVYYUjM7ftlqsqmrhMZE0=
sigs.k8s.io/cloud-provider-azure v1.27.1-0.20230629023444-76504759ed59 h1:qWDatgK43rjmRGZ+VlDRE37+6IhnerXS5ncRvwTI7II=
sigs.k8s.io/cloud-provider-azure v1.27.1-0.20230629023444-76504759ed59/go.mod h1:loPr/85Nm8kXVIh4OZgkJvmKMr+CzrQhbcAqOD3mxfk=
+sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.0-20231012171618-1890d8703623 h1:VM3SZMnMN5Tabj3+TyI7P/5uIiz5B2Xyknqy7NgoS7k=
+sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.0-20231012171618-1890d8703623/go.mod h1:0wVQ8C8BgwINQnjLDwRB3YniyIpI3shS7Vvx2hcmDLg=
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE=
diff --git a/test/e2e/testsuites/dynamically_provisioned_azuredisk_detach_tester.go b/test/e2e/testsuites/dynamically_provisioned_azuredisk_detach_tester.go
index 19330c3da0..af50f27ef0 100644
--- a/test/e2e/testsuites/dynamically_provisioned_azuredisk_detach_tester.go
+++ b/test/e2e/testsuites/dynamically_provisioned_azuredisk_detach_tester.go
@@ -21,7 +21,7 @@ import (
"fmt"
"time"
- "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute"
+ compute "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5"
"github.com/onsi/ginkgo/v2"
"github.com/onsi/gomega"
@@ -86,7 +86,7 @@ func (t *DynamicallyProvisionedAzureDiskDetach) Run(ctx context.Context, client
framework.ExpectNoError(err, fmt.Sprintf("Error getting client for azuredisk %v", err))
disktest, err := disksClient.Get(ctx, resourceGroup, diskName)
framework.ExpectNoError(err, fmt.Sprintf("Error getting disk for azuredisk %v", err))
- framework.ExpectEqual(compute.Attached, disktest.DiskState)
+ framework.ExpectEqual(compute.DiskStateAttached, *disktest.Properties.DiskState)
ginkgo.By("begin to delete the pod")
tpod.Cleanup(ctx)
@@ -96,10 +96,10 @@ func (t *DynamicallyProvisionedAzureDiskDetach) Run(ctx context.Context, client
if err != nil {
return false, fmt.Errorf("Error getting disk for azuredisk %v", err)
}
- if disktest.DiskState == compute.Unattached {
+ if *disktest.Properties.DiskState == compute.DiskStateUnattached {
return true, nil
}
- ginkgo.By(fmt.Sprintf("current disk state(%v) is not in unattached state, wait and recheck", disktest.DiskState))
+ ginkgo.By(fmt.Sprintf("current disk state(%v) is not in unattached state, wait and recheck", *disktest.Properties.DiskState))
return false, nil
})
framework.ExpectNoError(err, fmt.Sprintf("waiting for disk detach complete returned with error: %v", err))
diff --git a/test/e2e/testsuites/dynamically_provisioned_resize_volume_tester.go b/test/e2e/testsuites/dynamically_provisioned_resize_volume_tester.go
index 36a99c4345..420ae39fc6 100644
--- a/test/e2e/testsuites/dynamically_provisioned_resize_volume_tester.go
+++ b/test/e2e/testsuites/dynamically_provisioned_resize_volume_tester.go
@@ -157,7 +157,7 @@ func (t *DynamicallyProvisionedResizeVolumeTest) Run(ctx context.Context, client
framework.ExpectNoError(err, fmt.Sprintf("Error getting client for azuredisk %v", err))
disktest, err := disksClient.Get(ctx, resourceGroup, diskName)
framework.ExpectNoError(err, fmt.Sprintf("Error getting disk for azuredisk %v", err))
- newdiskSize := strconv.Itoa(int(*disktest.DiskSizeGB)) + "Gi"
+ newdiskSize := strconv.Itoa(int(*disktest.Properties.DiskSizeGB)) + "Gi"
if !(newSize.String() == newdiskSize) {
framework.Failf("newPVCSize(%+v) is not equal to new azurediskSize(%+v)", newSize.String(), newdiskSize)
}
diff --git a/test/utils/azure/azure_helpers.go b/test/utils/azure/azure_helpers.go
index d0b0a17a0e..96ecbad349 100644
--- a/test/utils/azure/azure_helpers.go
+++ b/test/utils/azure/azure_helpers.go
@@ -19,59 +19,81 @@ package azure
import (
"context"
"fmt"
- "log"
"os"
- "strings"
"time"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud"
- "github.com/Azure/azure-sdk-for-go/sdk/azidentity"
- "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute"
- "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network"
- "github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2018-05-01/resources"
- "github.com/Azure/go-autorest/autorest/azure"
- "github.com/jongio/azidext/go/azidext"
- "k8s.io/utils/pointer"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/to"
+ compute "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5"
+ network "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4"
+ resources "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources"
+ "sigs.k8s.io/cloud-provider-azure/pkg/azclient"
+ "sigs.k8s.io/cloud-provider-azure/pkg/azclient/diskclient"
+ "sigs.k8s.io/cloud-provider-azure/pkg/azclient/interfaceclient"
+ "sigs.k8s.io/cloud-provider-azure/pkg/azclient/resourcegroupclient"
+ "sigs.k8s.io/cloud-provider-azure/pkg/azclient/sshpublickeyresourceclient"
+ "sigs.k8s.io/cloud-provider-azure/pkg/azclient/subnetclient"
+ "sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachineclient"
+ "sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualnetworkclient"
)
type Client struct {
- environment azure.Environment
- subscriptionID string
- groupsClient resources.GroupsClient
- vmClient compute.VirtualMachinesClient
- nicClient network.InterfacesClient
- subnetsClient network.SubnetsClient
- vnetClient network.VirtualNetworksClient
- disksClient compute.DisksClient
- sshPublicKeysClient compute.SSHPublicKeysClient
+ groupsClient resourcegroupclient.Interface
+ vmClient virtualmachineclient.Interface
+ nicClient interfaceclient.Interface
+ subnetsClient subnetclient.Interface
+ vnetClient virtualnetworkclient.Interface
+ disksClient diskclient.Interface
+ sshPublicKeysClient sshpublickeyresourceclient.Interface
}
func GetAzureClient(cloud, subscriptionID, clientID, tenantID, clientSecret string) (*Client, error) {
- env, err := azure.EnvironmentFromName(cloud)
+ armConfig := &azclient.ARMClientConfig{
+ Cloud: cloud,
+ }
+ cloudConfig, err := azclient.GetAzureCloudConfig(armConfig)
if err != nil {
return nil, err
}
-
- options := azidentity.ClientSecretCredentialOptions{
- ClientOptions: azcore.ClientOptions{
- Cloud: getCloudConfig(env),
+ credProvider, err := azclient.NewAuthProvider(azclient.AzureAuthConfig{
+ TenantID: tenantID,
+ AADClientID: clientID,
+ AADClientSecret: clientSecret,
+ }, &arm.ClientOptions{
+ AuxiliaryTenants: []string{tenantID},
+ ClientOptions: policy.ClientOptions{
+ Cloud: *cloudConfig,
},
+ })
+ if err != nil {
+ return nil, err
}
- cred, err := azidentity.NewClientSecretCredential(tenantID, clientID, clientSecret, &options)
+ cred, err := credProvider.GetAzIdentity()
if err != nil {
return nil, err
}
-
- return getClient(env, subscriptionID, tenantID, cred, env.TokenAudience), nil
+ factory, err := azclient.NewClientFactory(&azclient.ClientFactoryConfig{
+ SubscriptionID: subscriptionID,
+ }, armConfig, cred)
+ if err != nil {
+ return nil, err
+ }
+ return &Client{
+ groupsClient: factory.GetResourceGroupClient(),
+ vmClient: factory.GetVirtualMachineClient(),
+ nicClient: factory.GetInterfaceClient(),
+ subnetsClient: factory.GetSubnetClient(),
+ vnetClient: factory.GetVirtualNetworkClient(),
+ disksClient: factory.GetDiskClient(),
+ sshPublicKeysClient: factory.GetSSHPublicKeyResourceClient(),
+ }, nil
}
-
-func (az *Client) GetAzureDisksClient() (compute.DisksClient, error) {
-
+func (az *Client) GetAzureDisksClient() (diskclient.Interface, error) {
return az.disksClient, nil
}
-func (az *Client) EnsureSSHPublicKey(ctx context.Context, _, resourceGroupName, location, keyName string) (publicKey string, err error) {
+func (az *Client) EnsureSSHPublicKey(ctx context.Context, resourceGroupName, location, keyName string) (publicKey string, err error) {
_, err = az.sshPublicKeysClient.Create(ctx, resourceGroupName, keyName, compute.SSHPublicKeyResource{Location: &location})
if err != nil {
return "", err
@@ -83,91 +105,85 @@ func (az *Client) EnsureSSHPublicKey(ctx context.Context, _, resourceGroupName,
return *result.PublicKey, nil
}
-func (az *Client) EnsureResourceGroup(ctx context.Context, name, location string, managedBy *string) (resourceGroup *resources.Group, err error) {
+func (az *Client) EnsureResourceGroup(ctx context.Context, name, location string, managedBy *string) (resourceGroup *resources.ResourceGroup, err error) {
var tags map[string]*string
group, err := az.groupsClient.Get(ctx, name)
- if err == nil && group.Tags != nil {
+ if err == nil && group != nil && group.Tags != nil {
tags = group.Tags
} else {
tags = make(map[string]*string)
}
- if managedBy == nil {
+ if managedBy == nil && group != nil {
managedBy = group.ManagedBy
}
// Tags for correlating resource groups with prow jobs on testgrid
- tags["buildID"] = stringPointer(os.Getenv("BUILD_ID"))
- tags["jobName"] = stringPointer(os.Getenv("JOB_NAME"))
- tags["creationTimestamp"] = stringPointer(time.Now().UTC().Format(time.RFC3339))
+ tags["buildID"] = to.Ptr(os.Getenv("BUILD_ID"))
+ tags["jobName"] = to.Ptr(os.Getenv("JOB_NAME"))
+ tags["creationTimestamp"] = to.Ptr(time.Now().UTC().Format(time.RFC3339))
- response, err := az.groupsClient.CreateOrUpdate(ctx, name, resources.Group{
+ response, err := az.groupsClient.CreateOrUpdate(ctx, name, resources.ResourceGroup{
Name: &name,
Location: &location,
ManagedBy: managedBy,
Tags: tags,
})
if err != nil {
- return &response, err
+ return response, err
}
- return &response, nil
+ return response, nil
}
func (az *Client) DeleteResourceGroup(ctx context.Context, groupName string) error {
_, err := az.groupsClient.Get(ctx, groupName)
if err == nil {
- future, err := az.groupsClient.Delete(ctx, groupName)
+ err = az.groupsClient.Delete(ctx, groupName)
if err != nil {
return fmt.Errorf("cannot delete resource group %v: %v", groupName, err)
}
- err = future.WaitForCompletionRef(ctx, az.groupsClient.Client)
- if err != nil {
- // Skip the teardown errors because of https://github.com/Azure/go-autorest/issues/357
- // TODO(feiskyer): fix the issue by upgrading go-autorest version >= v11.3.2.
- log.Printf("Warning: failed to delete resource group %q with error %v", groupName, err)
- }
}
return nil
}
-func (az *Client) EnsureVirtualMachine(ctx context.Context, groupName, location, vmName string) (vm compute.VirtualMachine, err error) {
+func (az *Client) EnsureVirtualMachine(ctx context.Context, groupName, location, vmName string) (compute.VirtualMachine, error) {
nic, err := az.EnsureNIC(ctx, groupName, location, vmName+"-nic", vmName+"-vnet", vmName+"-subnet")
if err != nil {
- return vm, err
+ return compute.VirtualMachine{}, err
}
- publicKey, err := az.EnsureSSHPublicKey(ctx, az.subscriptionID, groupName, location, "test-key")
+ publicKey, err := az.EnsureSSHPublicKey(ctx, groupName, location, "test-key")
if err != nil {
- return vm, err
+ return compute.VirtualMachine{}, err
}
- future, err := az.vmClient.CreateOrUpdate(
+ resp, err := az.vmClient.CreateOrUpdate(
ctx,
groupName,
vmName,
compute.VirtualMachine{
- Location: pointer.String(location),
- VirtualMachineProperties: &compute.VirtualMachineProperties{
+ Location: to.Ptr(location),
+ Properties: &compute.VirtualMachineProperties{
HardwareProfile: &compute.HardwareProfile{
- VMSize: "Standard_DS2_v2",
+ VMSize: to.Ptr(compute.VirtualMachineSizeTypesStandardDS2V2),
},
StorageProfile: &compute.StorageProfile{
ImageReference: &compute.ImageReference{
- Publisher: pointer.String("Canonical"),
- Offer: pointer.String("UbuntuServer"),
- Sku: pointer.String("16.04.0-LTS"),
- Version: pointer.String("latest"),
+ Publisher: to.Ptr("Canonical"),
+ Offer: to.Ptr("UbuntuServer"),
+ SKU: to.Ptr("16.04.0-LTS"),
+ Version: to.Ptr("latest"),
},
},
- OsProfile: &compute.OSProfile{
- ComputerName: pointer.String(vmName),
- AdminUsername: pointer.String("azureuser"),
- AdminPassword: pointer.String("Azureuser1234"),
+ OSProfile: &compute.OSProfile{
+ ComputerName: to.Ptr(vmName),
+ AdminUsername: to.Ptr("azureuser"),
+ AdminPassword: to.Ptr("Azureuser1234"),
LinuxConfiguration: &compute.LinuxConfiguration{
- DisablePasswordAuthentication: pointer.Bool(true),
+ DisablePasswordAuthentication: to.Ptr(true),
SSH: &compute.SSHConfiguration{
- PublicKeys: &[]compute.SSHPublicKey{
+ PublicKeys: []*compute.SSHPublicKey{
{
- Path: pointer.String("/home/azureuser/.ssh/authorized_keys"),
+ Path: to.Ptr("/home/azureuser/.ssh/authorized_keys"),
KeyData: &publicKey,
},
},
@@ -175,11 +191,11 @@ func (az *Client) EnsureVirtualMachine(ctx context.Context, groupName, location,
},
},
NetworkProfile: &compute.NetworkProfile{
- NetworkInterfaces: &[]compute.NetworkInterfaceReference{
+ NetworkInterfaces: []*compute.NetworkInterfaceReference{
{
ID: nic.ID,
- NetworkInterfaceReferenceProperties: &compute.NetworkInterfaceReferenceProperties{
- Primary: pointer.Bool(true),
+ Properties: &compute.NetworkInterfaceReferenceProperties{
+ Primary: to.Ptr(true),
},
},
},
@@ -188,42 +204,37 @@ func (az *Client) EnsureVirtualMachine(ctx context.Context, groupName, location,
},
)
if err != nil {
- return vm, fmt.Errorf("cannot create vm: %v", err)
+ return compute.VirtualMachine{}, fmt.Errorf("cannot create vm: %v", err)
}
- err = future.WaitForCompletionRef(ctx, az.vmClient.Client)
- if err != nil {
- return vm, fmt.Errorf("cannot get the vm create or update future response: %v", err)
- }
-
- return future.Result(az.vmClient)
+ return *resp, nil
}
-func (az *Client) EnsureNIC(ctx context.Context, groupName, location, nicName, vnetName, subnetName string) (nic network.Interface, err error) {
- _, err = az.EnsureVirtualNetworkAndSubnet(ctx, groupName, location, vnetName, subnetName)
+func (az *Client) EnsureNIC(ctx context.Context, groupName, location, nicName, vnetName, subnetName string) (network.Interface, error) {
+ _, err := az.EnsureVirtualNetworkAndSubnet(ctx, groupName, location, vnetName, subnetName)
if err != nil {
- return nic, err
+ return network.Interface{}, err
}
subnet, err := az.GetVirtualNetworkSubnet(ctx, groupName, vnetName, subnetName)
if err != nil {
- return nic, fmt.Errorf("cannot get subnet %s of virtual network %s in %s: %v", subnetName, vnetName, groupName, err)
+ return network.Interface{}, fmt.Errorf("cannot get subnet %s of virtual network %s in %s: %v", subnetName, vnetName, groupName, err)
}
- future, err := az.nicClient.CreateOrUpdate(
+ nic, err := az.nicClient.CreateOrUpdate(
ctx,
groupName,
nicName,
network.Interface{
- Name: pointer.String(nicName),
- Location: pointer.String(location),
- InterfacePropertiesFormat: &network.InterfacePropertiesFormat{
- IPConfigurations: &[]network.InterfaceIPConfiguration{
+ Name: to.Ptr(nicName),
+ Location: to.Ptr(location),
+ Properties: &network.InterfacePropertiesFormat{
+ IPConfigurations: []*network.InterfaceIPConfiguration{
{
- Name: pointer.String("ipConfig1"),
- InterfaceIPConfigurationPropertiesFormat: &network.InterfaceIPConfigurationPropertiesFormat{
+ Name: to.Ptr("ipConfig1"),
+ Properties: &network.InterfaceIPConfigurationPropertiesFormat{
Subnet: &subnet,
- PrivateIPAllocationMethod: network.Dynamic,
+ PrivateIPAllocationMethod: to.Ptr(network.IPAllocationMethodDynamic),
},
},
},
@@ -231,33 +242,28 @@ func (az *Client) EnsureNIC(ctx context.Context, groupName, location, nicName, v
},
)
if err != nil {
- return nic, fmt.Errorf("cannot create nic: %v", err)
- }
-
- err = future.WaitForCompletionRef(ctx, az.nicClient.Client)
- if err != nil {
- return nic, fmt.Errorf("cannot get nic create or update future response: %v", err)
+ return network.Interface{}, fmt.Errorf("cannot create nic: %v", err)
}
- return future.Result(az.nicClient)
+ return *nic, nil
}
-func (az *Client) EnsureVirtualNetworkAndSubnet(ctx context.Context, groupName, location, vnetName, subnetName string) (vnet network.VirtualNetwork, err error) {
- future, err := az.vnetClient.CreateOrUpdate(
+func (az *Client) EnsureVirtualNetworkAndSubnet(ctx context.Context, groupName, location, vnetName, subnetName string) (network.VirtualNetwork, error) {
+ vnet, err := az.vnetClient.CreateOrUpdate(
ctx,
groupName,
vnetName,
network.VirtualNetwork{
- Location: pointer.String(location),
- VirtualNetworkPropertiesFormat: &network.VirtualNetworkPropertiesFormat{
+ Location: to.Ptr(location),
+ Properties: &network.VirtualNetworkPropertiesFormat{
AddressSpace: &network.AddressSpace{
- AddressPrefixes: &[]string{"10.0.0.0/8"},
+ AddressPrefixes: []*string{to.Ptr("10.0.0.0/8")},
},
- Subnets: &[]network.Subnet{
+ Subnets: []*network.Subnet{
{
- Name: pointer.String(subnetName),
- SubnetPropertiesFormat: &network.SubnetPropertiesFormat{
- AddressPrefix: pointer.String("10.0.0.0/16"),
+ Name: to.Ptr(subnetName),
+ Properties: &network.SubnetPropertiesFormat{
+ AddressPrefix: to.Ptr("10.0.0.0/16"),
},
},
},
@@ -265,76 +271,13 @@ func (az *Client) EnsureVirtualNetworkAndSubnet(ctx context.Context, groupName,
})
if err != nil {
- return vnet, fmt.Errorf("cannot create virtual network: %v", err)
+ return network.VirtualNetwork{}, fmt.Errorf("cannot create virtual network: %v", err)
}
- err = future.WaitForCompletionRef(ctx, az.vnetClient.Client)
- if err != nil {
- return vnet, fmt.Errorf("cannot get the vnet create or update future response: %v", err)
- }
-
- return future.Result(az.vnetClient)
+ return *vnet, nil
}
func (az *Client) GetVirtualNetworkSubnet(ctx context.Context, groupName, vnetName, subnetName string) (network.Subnet, error) {
- return az.subnetsClient.Get(ctx, groupName, vnetName, subnetName, "")
-}
-
-func getCloudConfig(env azure.Environment) cloud.Configuration {
- switch env.Name {
- case azure.USGovernmentCloud.Name:
- return cloud.AzureGovernment
- case azure.ChinaCloud.Name:
- return cloud.AzureChina
- case azure.PublicCloud.Name:
- return cloud.AzurePublic
- default:
- return cloud.Configuration{
- ActiveDirectoryAuthorityHost: env.ActiveDirectoryEndpoint,
- Services: map[cloud.ServiceName]cloud.ServiceConfiguration{
- cloud.ResourceManager: {
- Audience: env.TokenAudience,
- Endpoint: env.ResourceManagerEndpoint,
- },
- },
- }
- }
-}
-
-func getClient(env azure.Environment, subscriptionID, _ string, cred *azidentity.ClientSecretCredential, scope string) *Client {
- c := &Client{
- environment: env,
- subscriptionID: subscriptionID,
- groupsClient: resources.NewGroupsClientWithBaseURI(env.ResourceManagerEndpoint, subscriptionID),
- vmClient: compute.NewVirtualMachinesClientWithBaseURI(env.ResourceManagerEndpoint, subscriptionID),
- nicClient: network.NewInterfacesClientWithBaseURI(env.ResourceManagerEndpoint, subscriptionID),
- subnetsClient: network.NewSubnetsClientWithBaseURI(env.ResourceManagerEndpoint, subscriptionID),
- vnetClient: network.NewVirtualNetworksClientWithBaseURI(env.ResourceManagerEndpoint, subscriptionID),
- disksClient: compute.NewDisksClientWithBaseURI(env.ResourceManagerEndpoint, subscriptionID),
- sshPublicKeysClient: compute.NewSSHPublicKeysClientWithBaseURI(env.ResourceManagerEndpoint, subscriptionID),
- }
-
- if !strings.HasSuffix(scope, "/.default") {
- scope += "/.default"
- }
- // Use an adapter so azidentity in the Azure SDK can be used as Authorizer
- // when calling the Azure Management Packages, which we currently use. Once
- // the Azure SDK clients (found in /sdk) move to stable, we can update our
- // clients and they will be able to use the creds directly without the
- // authorizer.
- authorizer := azidext.NewTokenCredentialAdapter(cred, []string{scope})
-
- c.groupsClient.Authorizer = authorizer
- c.vmClient.Authorizer = authorizer
- c.nicClient.Authorizer = authorizer
- c.subnetsClient.Authorizer = authorizer
- c.vnetClient.Authorizer = authorizer
- c.disksClient.Authorizer = authorizer
- c.sshPublicKeysClient.Authorizer = authorizer
-
- return c
-}
-
-func stringPointer(s string) *string {
- return &s
+ subnet, err := az.subnetsClient.Get(ctx, groupName, vnetName, subnetName, nil)
+ return *subnet, err
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md
index 80321d29a9..b618676c59 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md
@@ -1,5 +1,155 @@
# Release History
+## 1.8.0 (2023-10-05)
+
+### Features Added
+
+* Added `Claims` and `EnableCAE` fields to `policy.TokenRequestOptions`.
+* ARM bearer token policy handles CAE challenges.
+* `messaging/CloudEvent` allows you to serialize/deserialize CloudEvents, as described in the CloudEvents 1.0 specification: [link](https://github.com/cloudevents/spec)
+* Added functions `FetcherForNextLink` and `EncodeQueryParams` along with `FetcherForNextLinkOptions` to the `runtime` package to centralize creation of `Pager[T].Fetcher` from a next link URL.
+* Added types `KeyCredential` and `SASCredential` to the `azcore` package.
+ * Includes their respective constructor functions.
+* Added types `KeyCredentialPolicy` and `SASCredentialPolicy` to the `azcore/runtime` package.
+ * Includes their respective constructor functions and options types.
+
+### Breaking Changes
+> These changes affect only code written against beta versions of `v1.8.0`
+* The beta features for tracing and fakes have been omitted for this release.
+
+### Bugs Fixed
+
+* Fixed an issue that could cause some ARM RPs to not be automatically registered.
+* Block bearer token authentication for non TLS protected endpoints.
+
+### Other Changes
+
+* The following functions in the `runtime` package are now exposed from the `policy` package, and the `runtime` versions have been deprecated.
+ * `WithCaptureResponse`
+ * `WithHTTPHeader`
+ * `WithRetryOptions`
+* Updated dependencies.
+
+## 1.7.2 (2023-09-06)
+
+### Bugs Fixed
+
+* Fix default HTTP transport to work in WASM modules.
+
+## 1.7.1 (2023-08-14)
+
+## Bugs Fixed
+
+* Enable TLS renegotiation in the default transport policy.
+
+## 1.7.0 (2023-07-12)
+
+### Features Added
+* Added method `WithClientName()` to type `azcore.Client` to support shallow cloning of a client with a new name used for tracing.
+
+### Breaking Changes
+> These changes affect only code written against beta versions v1.7.0-beta.1 or v1.7.0-beta.2
+* The beta features for CAE, tracing, and fakes have been omitted for this release.
+
+## 1.7.0-beta.2 (2023-06-06)
+
+### Breaking Changes
+> These changes affect only code written against beta version v1.7.0-beta.1
+* Method `SpanFromContext()` on type `tracing.Tracer` had the `bool` return value removed.
+ * This includes the field `SpanFromContext` in supporting type `tracing.TracerOptions`.
+* Method `AddError()` has been removed from type `tracing.Span`.
+* Method `Span.End()` now requires an argument of type `*tracing.SpanEndOptions`.
+
+## 1.6.1 (2023-06-06)
+
+### Bugs Fixed
+* Fixed an issue in `azcore.NewClient()` and `arm.NewClient()` that could cause an incorrect module name to be used in telemetry.
+
+### Other Changes
+* This version contains all bug fixes from `v1.7.0-beta.1`
+
+## 1.7.0-beta.1 (2023-05-24)
+
+### Features Added
+* Restored CAE support for ARM clients.
+* Added supporting features to enable distributed tracing.
+ * Added func `runtime.StartSpan()` for use by SDKs to start spans.
+ * Added method `WithContext()` to `runtime.Request` to support shallow cloning with a new context.
+ * Added field `TracingNamespace` to `runtime.PipelineOptions`.
+ * Added field `Tracer` to `runtime.NewPollerOptions` and `runtime.NewPollerFromResumeTokenOptions` types.
+ * Added field `SpanFromContext` to `tracing.TracerOptions`.
+ * Added methods `Enabled()`, `SetAttributes()`, and `SpanFromContext()` to `tracing.Tracer`.
+ * Added supporting pipeline policies to include HTTP spans when creating clients.
+* Added package `fake` to support generated fakes packages in SDKs.
+ * The package contains public surface area exposed by fake servers and supporting APIs intended only for use by the fake server implementations.
+ * Added an internal fake poller implementation.
+
+### Bugs Fixed
+* Retry policy always clones the underlying `*http.Request` before invoking the next policy.
+* Added some non-standard error codes to the list of error codes for unregistered resource providers.
+
+## 1.6.0 (2023-05-04)
+
+### Features Added
+* Added support for ARM cross-tenant authentication. Set the `AuxiliaryTenants` field of `arm.ClientOptions` to enable.
+* Added `TenantID` field to `policy.TokenRequestOptions`.
+
+## 1.5.0 (2023-04-06)
+
+### Features Added
+* Added `ShouldRetry` to `policy.RetryOptions` for finer-grained control over when to retry.
+
+### Breaking Changes
+> These changes affect only code written against a beta version such as v1.5.0-beta.1
+> These features will return in v1.6.0-beta.1.
+* Removed `TokenRequestOptions.Claims` and `.TenantID`
+* Removed ARM client support for CAE and cross-tenant auth.
+
+### Bugs Fixed
+* Added non-conformant LRO terminal states `Cancelled` and `Completed`.
+
+### Other Changes
+* Updated to latest `internal` module.
+
+## 1.5.0-beta.1 (2023-03-02)
+
+### Features Added
+* This release includes the features added in v1.4.0-beta.1
+
+## 1.4.0 (2023-03-02)
+> This release doesn't include features added in v1.4.0-beta.1. They will return in v1.5.0-beta.1.
+
+### Features Added
+* Add `Clone()` method for `arm/policy.ClientOptions`.
+
+### Bugs Fixed
+* ARM's RP registration policy will no longer swallow unrecognized errors.
+* Fixed an issue in `runtime.NewPollerFromResumeToken()` when resuming a `Poller` with a custom `PollingHandler`.
+* Fixed wrong policy copy in `arm/runtime.NewPipeline()`.
+
+## 1.4.0-beta.1 (2023-02-02)
+
+### Features Added
+* Added support for ARM cross-tenant authentication. Set the `AuxiliaryTenants` field of `arm.ClientOptions` to enable.
+* Added `Claims` and `TenantID` fields to `policy.TokenRequestOptions`.
+* ARM bearer token policy handles CAE challenges.
+
+## 1.3.1 (2023-02-02)
+
+### Other Changes
+* Update dependencies to latest versions.
+
+## 1.3.0 (2023-01-06)
+
+### Features Added
+* Added `BearerTokenOptions.AuthorizationHandler` to enable extending `runtime.BearerTokenPolicy`
+ with custom authorization logic
+* Added `Client` types and matching constructors to the `azcore` and `arm` packages. These represent a basic client for HTTP and ARM respectively.
+
+### Other Changes
+* Updated `internal` module to latest version.
+* `policy/Request.SetBody()` allows replacing a request's body with an empty one
+
## 1.2.0 (2022-11-04)
### Features Added
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/client.go
new file mode 100644
index 0000000000..aa34575f66
--- /dev/null
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/client.go
@@ -0,0 +1,78 @@
+//go:build go1.18
+// +build go1.18
+
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+package arm
+
+import (
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore"
+ armpolicy "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/policy"
+ armruntime "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing"
+)
+
+// ClientOptions contains configuration settings for a client's pipeline.
+type ClientOptions = armpolicy.ClientOptions
+
+// Client is a HTTP client for use with ARM endpoints. It consists of an endpoint, pipeline, and tracing provider.
+type Client struct {
+ ep string
+ pl runtime.Pipeline
+ tr tracing.Tracer
+}
+
+// NewClient creates a new Client instance with the provided values.
+// This client is intended to be used with Azure Resource Manager endpoints.
+// - clientName - the fully qualified name of the client ("module/package.Client"); this is used by the telemetry policy and tracing provider.
+// if module and package are the same value, the "module/" prefix can be omitted.
+// - moduleVersion - the version of the containing module; used by the telemetry policy
+// - cred - the TokenCredential used to authenticate the request
+// - options - optional client configurations; pass nil to accept the default values
+func NewClient(clientName, moduleVersion string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) {
+ mod, client, err := shared.ExtractModuleName(clientName)
+ if err != nil {
+ return nil, err
+ }
+
+ if options == nil {
+ options = &ClientOptions{}
+ }
+
+ if !options.Telemetry.Disabled {
+ if err := shared.ValidateModVer(moduleVersion); err != nil {
+ return nil, err
+ }
+ }
+
+ ep := cloud.AzurePublic.Services[cloud.ResourceManager].Endpoint
+ if c, ok := options.Cloud.Services[cloud.ResourceManager]; ok {
+ ep = c.Endpoint
+ }
+ pl, err := armruntime.NewPipeline(mod, moduleVersion, cred, runtime.PipelineOptions{}, options)
+ if err != nil {
+ return nil, err
+ }
+
+ tr := options.TracingProvider.NewTracer(client, moduleVersion)
+ return &Client{ep: ep, pl: pl, tr: tr}, nil
+}
+
+// Endpoint returns the service's base URL for this client.
+func (c *Client) Endpoint() string {
+ return c.ep
+}
+
+// Pipeline returns the pipeline for this client.
+func (c *Client) Pipeline() runtime.Pipeline {
+ return c.pl
+}
+
+// Tracer returns the tracer for this client.
+func (c *Client) Tracer() tracing.Tracer {
+ return c.tr
+}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/doc.go
new file mode 100644
index 0000000000..1bdd16a3d0
--- /dev/null
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/doc.go
@@ -0,0 +1,9 @@
+//go:build go1.18
+// +build go1.18
+
+// Copyright 2017 Microsoft Corporation. All rights reserved.
+// Use of this source code is governed by an MIT
+// license that can be found in the LICENSE file.
+
+// Package arm contains functionality specific to Azure Resource Manager clients.
+package arm
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource/resource_identifier.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource/resource_identifier.go
new file mode 100644
index 0000000000..187fe82b97
--- /dev/null
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource/resource_identifier.go
@@ -0,0 +1,224 @@
+//go:build go1.18
+// +build go1.18
+
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+package resource
+
+import (
+ "fmt"
+ "strings"
+)
+
+const (
+ providersKey = "providers"
+ subscriptionsKey = "subscriptions"
+ resourceGroupsLowerKey = "resourcegroups"
+ locationsKey = "locations"
+ builtInResourceNamespace = "Microsoft.Resources"
+)
+
+// RootResourceID defines the tenant as the root parent of all other ResourceID.
+var RootResourceID = &ResourceID{
+ Parent: nil,
+ ResourceType: TenantResourceType,
+ Name: "",
+}
+
+// ResourceID represents a resource ID such as `/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myRg`.
+// Don't create this type directly, use ParseResourceID instead.
+type ResourceID struct {
+ // Parent is the parent ResourceID of this instance.
+ // Can be nil if there is no parent.
+ Parent *ResourceID
+
+ // SubscriptionID is the subscription ID in this resource ID.
+ // The value can be empty if the resource ID does not contain a subscription ID.
+ SubscriptionID string
+
+ // ResourceGroupName is the resource group name in this resource ID.
+ // The value can be empty if the resource ID does not contain a resource group name.
+ ResourceGroupName string
+
+ // Provider represents the provider name in this resource ID.
+ // This is only valid when the resource ID represents a resource provider.
+ // Example: `/subscriptions/00000000-0000-0000-0000-000000000000/providers/Microsoft.Insights`
+ Provider string
+
+ // Location is the location in this resource ID.
+ // The value can be empty if the resource ID does not contain a location name.
+ Location string
+
+ // ResourceType represents the type of this resource ID.
+ ResourceType ResourceType
+
+ // Name is the resource name of this resource ID.
+ Name string
+
+ isChild bool
+ stringValue string
+}
+
+// ParseResourceID parses a string to an instance of ResourceID
+func ParseResourceID(id string) (*ResourceID, error) {
+ if len(id) == 0 {
+ return nil, fmt.Errorf("invalid resource ID: id cannot be empty")
+ }
+
+ if !strings.HasPrefix(id, "/") {
+ return nil, fmt.Errorf("invalid resource ID: resource id '%s' must start with '/'", id)
+ }
+
+ parts := splitStringAndOmitEmpty(id, "/")
+
+ if len(parts) < 2 {
+ return nil, fmt.Errorf("invalid resource ID: %s", id)
+ }
+
+ if !strings.EqualFold(parts[0], subscriptionsKey) && !strings.EqualFold(parts[0], providersKey) {
+ return nil, fmt.Errorf("invalid resource ID: %s", id)
+ }
+
+ return appendNext(RootResourceID, parts, id)
+}
+
+// String returns the string of the ResourceID
+func (id *ResourceID) String() string {
+ if len(id.stringValue) > 0 {
+ return id.stringValue
+ }
+
+ if id.Parent == nil {
+ return ""
+ }
+
+ builder := strings.Builder{}
+ builder.WriteString(id.Parent.String())
+
+ if id.isChild {
+ builder.WriteString(fmt.Sprintf("/%s", id.ResourceType.lastType()))
+ if len(id.Name) > 0 {
+ builder.WriteString(fmt.Sprintf("/%s", id.Name))
+ }
+ } else {
+ builder.WriteString(fmt.Sprintf("/providers/%s/%s/%s", id.ResourceType.Namespace, id.ResourceType.Type, id.Name))
+ }
+
+ id.stringValue = builder.String()
+
+ return id.stringValue
+}
+
+func newResourceID(parent *ResourceID, resourceTypeName string, resourceName string) *ResourceID {
+ id := &ResourceID{}
+ id.init(parent, chooseResourceType(resourceTypeName, parent), resourceName, true)
+ return id
+}
+
+func newResourceIDWithResourceType(parent *ResourceID, resourceType ResourceType, resourceName string) *ResourceID {
+ id := &ResourceID{}
+ id.init(parent, resourceType, resourceName, true)
+ return id
+}
+
+func newResourceIDWithProvider(parent *ResourceID, providerNamespace, resourceTypeName, resourceName string) *ResourceID {
+ id := &ResourceID{}
+ id.init(parent, NewResourceType(providerNamespace, resourceTypeName), resourceName, false)
+ return id
+}
+
+func chooseResourceType(resourceTypeName string, parent *ResourceID) ResourceType {
+ if strings.EqualFold(resourceTypeName, resourceGroupsLowerKey) {
+ return ResourceGroupResourceType
+ } else if strings.EqualFold(resourceTypeName, subscriptionsKey) && parent != nil && parent.ResourceType.String() == TenantResourceType.String() {
+ return SubscriptionResourceType
+ }
+
+ return parent.ResourceType.AppendChild(resourceTypeName)
+}
+
+func (id *ResourceID) init(parent *ResourceID, resourceType ResourceType, name string, isChild bool) {
+ if parent != nil {
+ id.Provider = parent.Provider
+ id.SubscriptionID = parent.SubscriptionID
+ id.ResourceGroupName = parent.ResourceGroupName
+ id.Location = parent.Location
+ }
+
+ if resourceType.String() == SubscriptionResourceType.String() {
+ id.SubscriptionID = name
+ }
+
+ if resourceType.lastType() == locationsKey {
+ id.Location = name
+ }
+
+ if resourceType.String() == ResourceGroupResourceType.String() {
+ id.ResourceGroupName = name
+ }
+
+ if resourceType.String() == ProviderResourceType.String() {
+ id.Provider = name
+ }
+
+ if parent == nil {
+ id.Parent = RootResourceID
+ } else {
+ id.Parent = parent
+ }
+ id.isChild = isChild
+ id.ResourceType = resourceType
+ id.Name = name
+}
+
+func appendNext(parent *ResourceID, parts []string, id string) (*ResourceID, error) {
+ if len(parts) == 0 {
+ return parent, nil
+ }
+
+ if len(parts) == 1 {
+ // subscriptions and resourceGroups are not valid ids without their names
+ if strings.EqualFold(parts[0], subscriptionsKey) || strings.EqualFold(parts[0], resourceGroupsLowerKey) {
+ return nil, fmt.Errorf("invalid resource ID: %s", id)
+ }
+
+ // resourceGroup must contain either child or provider resource type
+ if parent.ResourceType.String() == ResourceGroupResourceType.String() {
+ return nil, fmt.Errorf("invalid resource ID: %s", id)
+ }
+
+ return newResourceID(parent, parts[0], ""), nil
+ }
+
+ if strings.EqualFold(parts[0], providersKey) && (len(parts) == 2 || strings.EqualFold(parts[2], providersKey)) {
+ //provider resource can only be on a tenant or a subscription parent
+ if parent.ResourceType.String() != SubscriptionResourceType.String() && parent.ResourceType.String() != TenantResourceType.String() {
+ return nil, fmt.Errorf("invalid resource ID: %s", id)
+ }
+
+ return appendNext(newResourceIDWithResourceType(parent, ProviderResourceType, parts[1]), parts[2:], id)
+ }
+
+ if len(parts) > 3 && strings.EqualFold(parts[0], providersKey) {
+ return appendNext(newResourceIDWithProvider(parent, parts[1], parts[2], parts[3]), parts[4:], id)
+ }
+
+ if len(parts) > 1 && !strings.EqualFold(parts[0], providersKey) {
+ return appendNext(newResourceID(parent, parts[0], parts[1]), parts[2:], id)
+ }
+
+ return nil, fmt.Errorf("invalid resource ID: %s", id)
+}
+
+func splitStringAndOmitEmpty(v, sep string) []string {
+ r := make([]string, 0)
+ for _, s := range strings.Split(v, sep) {
+ if len(s) == 0 {
+ continue
+ }
+ r = append(r, s)
+ }
+
+ return r
+}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource/resource_type.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource/resource_type.go
new file mode 100644
index 0000000000..ca03ac9713
--- /dev/null
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource/resource_type.go
@@ -0,0 +1,114 @@
+//go:build go1.18
+// +build go1.18
+
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+package resource
+
+import (
+ "fmt"
+ "strings"
+)
+
+// SubscriptionResourceType is the ResourceType of a subscription
+var SubscriptionResourceType = NewResourceType(builtInResourceNamespace, "subscriptions")
+
+// ResourceGroupResourceType is the ResourceType of a resource group
+var ResourceGroupResourceType = NewResourceType(builtInResourceNamespace, "resourceGroups")
+
+// TenantResourceType is the ResourceType of a tenant
+var TenantResourceType = NewResourceType(builtInResourceNamespace, "tenants")
+
+// ProviderResourceType is the ResourceType of a provider
+var ProviderResourceType = NewResourceType(builtInResourceNamespace, "providers")
+
+// ResourceType represents an Azure resource type, e.g. "Microsoft.Network/virtualNetworks/subnets".
+// Don't create this type directly, use ParseResourceType or NewResourceType instead.
+type ResourceType struct {
+ // Namespace is the namespace of the resource type.
+ // e.g. "Microsoft.Network" in resource type "Microsoft.Network/virtualNetworks/subnets"
+ Namespace string
+
+ // Type is the full type name of the resource type.
+ // e.g. "virtualNetworks/subnets" in resource type "Microsoft.Network/virtualNetworks/subnets"
+ Type string
+
+ // Types is the slice of all the sub-types of this resource type.
+ // e.g. ["virtualNetworks", "subnets"] in resource type "Microsoft.Network/virtualNetworks/subnets"
+ Types []string
+
+ stringValue string
+}
+
+// String returns the string of the ResourceType
+func (t ResourceType) String() string {
+ return t.stringValue
+}
+
+// IsParentOf returns true when the receiver is the parent resource type of the child.
+func (t ResourceType) IsParentOf(child ResourceType) bool {
+ if !strings.EqualFold(t.Namespace, child.Namespace) {
+ return false
+ }
+ if len(t.Types) >= len(child.Types) {
+ return false
+ }
+ for i := range t.Types {
+ if !strings.EqualFold(t.Types[i], child.Types[i]) {
+ return false
+ }
+ }
+
+ return true
+}
+
+// AppendChild creates an instance of ResourceType using the receiver as the parent with childType appended to it.
+func (t ResourceType) AppendChild(childType string) ResourceType {
+ return NewResourceType(t.Namespace, fmt.Sprintf("%s/%s", t.Type, childType))
+}
+
+// NewResourceType creates an instance of ResourceType using a provider namespace
+// such as "Microsoft.Network" and type such as "virtualNetworks/subnets".
+func NewResourceType(providerNamespace, typeName string) ResourceType {
+ return ResourceType{
+ Namespace: providerNamespace,
+ Type: typeName,
+ Types: splitStringAndOmitEmpty(typeName, "/"),
+ stringValue: fmt.Sprintf("%s/%s", providerNamespace, typeName),
+ }
+}
+
+// ParseResourceType parses the ResourceType from a resource type string (e.g. Microsoft.Network/virtualNetworks/subsets)
+// or a resource identifier string.
+// e.g. /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myRg/providers/Microsoft.Network/virtualNetworks/vnet/subnets/mySubnet)
+func ParseResourceType(resourceIDOrType string) (ResourceType, error) {
+ // split the path into segments
+ parts := splitStringAndOmitEmpty(resourceIDOrType, "/")
+
+ // There must be at least a namespace and type name
+ if len(parts) < 1 {
+ return ResourceType{}, fmt.Errorf("invalid resource ID or type: %s", resourceIDOrType)
+ }
+
+ // if the type is just subscriptions, it is a built-in type in the Microsoft.Resources namespace
+ if len(parts) == 1 {
+ // Simple resource type
+ return NewResourceType(builtInResourceNamespace, parts[0]), nil
+ } else if strings.Contains(parts[0], ".") {
+ // Handle resource types (Microsoft.Compute/virtualMachines, Microsoft.Network/virtualNetworks/subnets)
+ // it is a full type name
+ return NewResourceType(parts[0], strings.Join(parts[1:], "/")), nil
+ } else {
+ // Check if ResourceID
+ id, err := ParseResourceID(resourceIDOrType)
+ if err != nil {
+ return ResourceType{}, err
+ }
+ return NewResourceType(id.ResourceType.Namespace, id.ResourceType.Type), nil
+ }
+}
+
+func (t ResourceType) lastType() string {
+ return t.Types[len(t.Types)-1]
+}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/policy/policy.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/policy/policy.go
new file mode 100644
index 0000000000..83cf91e3ec
--- /dev/null
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/policy/policy.go
@@ -0,0 +1,98 @@
+//go:build go1.18
+// +build go1.18
+
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+package policy
+
+import (
+ "time"
+
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
+)
+
+// BearerTokenOptions configures the bearer token policy's behavior.
+type BearerTokenOptions struct {
+ // AuxiliaryTenants are additional tenant IDs for authenticating cross-tenant requests.
+ // The policy will add a token from each of these tenants to every request. The
+ // authenticating user or service principal must be a guest in these tenants, and the
+ // policy's credential must support multitenant authentication.
+ AuxiliaryTenants []string
+
+ // Scopes contains the list of permission scopes required for the token.
+ Scopes []string
+}
+
+// RegistrationOptions configures the registration policy's behavior.
+// All zero-value fields will be initialized with their default values.
+type RegistrationOptions struct {
+ policy.ClientOptions
+
+ // MaxAttempts is the total number of times to attempt automatic registration
+ // in the event that an attempt fails.
+ // The default value is 3.
+ // Set to a value less than zero to disable the policy.
+ MaxAttempts int
+
+ // PollingDelay is the amount of time to sleep between polling intervals.
+ // The default value is 15 seconds.
+ // A value less than zero means no delay between polling intervals (not recommended).
+ PollingDelay time.Duration
+
+ // PollingDuration is the amount of time to wait before abandoning polling.
+ // The default valule is 5 minutes.
+ // NOTE: Setting this to a small value might cause the policy to prematurely fail.
+ PollingDuration time.Duration
+}
+
+// ClientOptions contains configuration settings for a client's pipeline.
+type ClientOptions struct {
+ policy.ClientOptions
+
+ // AuxiliaryTenants are additional tenant IDs for authenticating cross-tenant requests.
+ // The client will add a token from each of these tenants to every request. The
+ // authenticating user or service principal must be a guest in these tenants, and the
+ // client's credential must support multitenant authentication.
+ AuxiliaryTenants []string
+
+ // DisableRPRegistration disables the auto-RP registration policy. Defaults to false.
+ DisableRPRegistration bool
+}
+
+// Clone return a deep copy of the current options.
+func (o *ClientOptions) Clone() *ClientOptions {
+ if o == nil {
+ return nil
+ }
+ copiedOptions := *o
+ copiedOptions.Cloud.Services = copyMap(copiedOptions.Cloud.Services)
+ copiedOptions.Logging.AllowedHeaders = copyArray(copiedOptions.Logging.AllowedHeaders)
+ copiedOptions.Logging.AllowedQueryParams = copyArray(copiedOptions.Logging.AllowedQueryParams)
+ copiedOptions.Retry.StatusCodes = copyArray(copiedOptions.Retry.StatusCodes)
+ copiedOptions.PerRetryPolicies = copyArray(copiedOptions.PerRetryPolicies)
+ copiedOptions.PerCallPolicies = copyArray(copiedOptions.PerCallPolicies)
+ return &copiedOptions
+}
+
+// copyMap return a new map with all the key value pair in the src map
+func copyMap[K comparable, V any](src map[K]V) map[K]V {
+ if src == nil {
+ return nil
+ }
+ copiedMap := make(map[K]V)
+ for k, v := range src {
+ copiedMap[k] = v
+ }
+ return copiedMap
+}
+
+// copyMap return a new array with all the elements in the src array
+func copyArray[T any](src []T) []T {
+ if src == nil {
+ return nil
+ }
+ copiedArray := make([]T, len(src))
+ copy(copiedArray, src)
+ return copiedArray
+}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/resource_identifier.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/resource_identifier.go
new file mode 100644
index 0000000000..d35d6374fd
--- /dev/null
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/resource_identifier.go
@@ -0,0 +1,23 @@
+//go:build go1.18
+// +build go1.18
+
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+package arm
+
+import (
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource"
+)
+
+// RootResourceID defines the tenant as the root parent of all other ResourceID.
+var RootResourceID = resource.RootResourceID
+
+// ResourceID represents a resource ID such as `/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myRg`.
+// Don't create this type directly, use ParseResourceID instead.
+type ResourceID = resource.ResourceID
+
+// ParseResourceID parses a string to an instance of ResourceID
+func ParseResourceID(id string) (*ResourceID, error) {
+ return resource.ParseResourceID(id)
+}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/resource_type.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/resource_type.go
new file mode 100644
index 0000000000..fc7fbffd26
--- /dev/null
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/resource_type.go
@@ -0,0 +1,40 @@
+//go:build go1.18
+// +build go1.18
+
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+package arm
+
+import (
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource"
+)
+
+// SubscriptionResourceType is the ResourceType of a subscription
+var SubscriptionResourceType = resource.SubscriptionResourceType
+
+// ResourceGroupResourceType is the ResourceType of a resource group
+var ResourceGroupResourceType = resource.ResourceGroupResourceType
+
+// TenantResourceType is the ResourceType of a tenant
+var TenantResourceType = resource.TenantResourceType
+
+// ProviderResourceType is the ResourceType of a provider
+var ProviderResourceType = resource.ProviderResourceType
+
+// ResourceType represents an Azure resource type, e.g. "Microsoft.Network/virtualNetworks/subnets".
+// Don't create this type directly, use ParseResourceType or NewResourceType instead.
+type ResourceType = resource.ResourceType
+
+// NewResourceType creates an instance of ResourceType using a provider namespace
+// such as "Microsoft.Network" and type such as "virtualNetworks/subnets".
+func NewResourceType(providerNamespace, typeName string) ResourceType {
+ return resource.NewResourceType(providerNamespace, typeName)
+}
+
+// ParseResourceType parses the ResourceType from a resource type string (e.g. Microsoft.Network/virtualNetworks/subsets)
+// or a resource identifier string.
+// e.g. /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myRg/providers/Microsoft.Network/virtualNetworks/vnet/subnets/mySubnet)
+func ParseResourceType(resourceIDOrType string) (ResourceType, error) {
+ return resource.ParseResourceType(resourceIDOrType)
+}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/pipeline.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/pipeline.go
new file mode 100644
index 0000000000..266c74b17b
--- /dev/null
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/pipeline.go
@@ -0,0 +1,64 @@
+//go:build go1.18
+// +build go1.18
+
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+package runtime
+
+import (
+ "errors"
+ "reflect"
+
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore"
+ armpolicy "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/policy"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud"
+ azpolicy "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
+ azruntime "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
+)
+
+// NewPipeline creates a pipeline from connection options. Policies from ClientOptions are
+// placed after policies from PipelineOptions. The telemetry policy, when enabled, will
+// use the specified module and version info.
+func NewPipeline(module, version string, cred azcore.TokenCredential, plOpts azruntime.PipelineOptions, options *armpolicy.ClientOptions) (azruntime.Pipeline, error) {
+ if options == nil {
+ options = &armpolicy.ClientOptions{}
+ }
+ conf, err := getConfiguration(&options.ClientOptions)
+ if err != nil {
+ return azruntime.Pipeline{}, err
+ }
+ authPolicy := NewBearerTokenPolicy(cred, &armpolicy.BearerTokenOptions{
+ AuxiliaryTenants: options.AuxiliaryTenants,
+ Scopes: []string{conf.Audience + "/.default"},
+ })
+ perRetry := make([]azpolicy.Policy, len(plOpts.PerRetry), len(plOpts.PerRetry)+1)
+ copy(perRetry, plOpts.PerRetry)
+ plOpts.PerRetry = append(perRetry, authPolicy)
+ if !options.DisableRPRegistration {
+ regRPOpts := armpolicy.RegistrationOptions{ClientOptions: options.ClientOptions}
+ regPolicy, err := NewRPRegistrationPolicy(cred, ®RPOpts)
+ if err != nil {
+ return azruntime.Pipeline{}, err
+ }
+ perCall := make([]azpolicy.Policy, len(plOpts.PerCall), len(plOpts.PerCall)+1)
+ copy(perCall, plOpts.PerCall)
+ plOpts.PerCall = append(perCall, regPolicy)
+ }
+ if plOpts.APIVersion.Name == "" {
+ plOpts.APIVersion.Name = "api-version"
+ }
+ return azruntime.NewPipeline(module, version, plOpts, &options.ClientOptions), nil
+}
+
+func getConfiguration(o *azpolicy.ClientOptions) (cloud.ServiceConfiguration, error) {
+ c := cloud.AzurePublic
+ if !reflect.ValueOf(o.Cloud).IsZero() {
+ c = o.Cloud
+ }
+ if conf, ok := c.Services[cloud.ResourceManager]; ok && conf.Endpoint != "" && conf.Audience != "" {
+ return conf, nil
+ } else {
+ return conf, errors.New("provided Cloud field is missing Azure Resource Manager configuration")
+ }
+}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/policy_bearer_token.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/policy_bearer_token.go
new file mode 100644
index 0000000000..54b3bb78d8
--- /dev/null
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/policy_bearer_token.go
@@ -0,0 +1,145 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+package runtime
+
+import (
+ "context"
+ "encoding/base64"
+ "fmt"
+ "net/http"
+ "strings"
+ "time"
+
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore"
+ armpolicy "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/policy"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared"
+ azpolicy "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
+ azruntime "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
+ "github.com/Azure/azure-sdk-for-go/sdk/internal/temporal"
+)
+
+const headerAuxiliaryAuthorization = "x-ms-authorization-auxiliary"
+
+// acquiringResourceState holds data for an auxiliary token request
+type acquiringResourceState struct {
+ ctx context.Context
+ p *BearerTokenPolicy
+ tenant string
+}
+
+// acquireAuxToken acquires a token from an auxiliary tenant. Only one thread/goroutine at a time ever calls this function.
+func acquireAuxToken(state acquiringResourceState) (newResource azcore.AccessToken, newExpiration time.Time, err error) {
+ tk, err := state.p.cred.GetToken(state.ctx, azpolicy.TokenRequestOptions{
+ EnableCAE: true,
+ Scopes: state.p.scopes,
+ TenantID: state.tenant,
+ })
+ if err != nil {
+ return azcore.AccessToken{}, time.Time{}, err
+ }
+ return tk, tk.ExpiresOn, nil
+}
+
+// BearerTokenPolicy authorizes requests with bearer tokens acquired from a TokenCredential.
+type BearerTokenPolicy struct {
+ auxResources map[string]*temporal.Resource[azcore.AccessToken, acquiringResourceState]
+ btp *azruntime.BearerTokenPolicy
+ cred azcore.TokenCredential
+ scopes []string
+}
+
+// NewBearerTokenPolicy creates a policy object that authorizes requests with bearer tokens.
+// cred: an azcore.TokenCredential implementation such as a credential object from azidentity
+// opts: optional settings. Pass nil to accept default values; this is the same as passing a zero-value options.
+func NewBearerTokenPolicy(cred azcore.TokenCredential, opts *armpolicy.BearerTokenOptions) *BearerTokenPolicy {
+ if opts == nil {
+ opts = &armpolicy.BearerTokenOptions{}
+ }
+ p := &BearerTokenPolicy{cred: cred}
+ p.auxResources = make(map[string]*temporal.Resource[azcore.AccessToken, acquiringResourceState], len(opts.AuxiliaryTenants))
+ for _, t := range opts.AuxiliaryTenants {
+ p.auxResources[t] = temporal.NewResource(acquireAuxToken)
+ }
+ p.scopes = make([]string, len(opts.Scopes))
+ copy(p.scopes, opts.Scopes)
+ p.btp = azruntime.NewBearerTokenPolicy(cred, opts.Scopes, &azpolicy.BearerTokenOptions{
+ AuthorizationHandler: azpolicy.AuthorizationHandler{
+ OnChallenge: p.onChallenge,
+ OnRequest: p.onRequest,
+ },
+ })
+ return p
+}
+
+func (b *BearerTokenPolicy) onChallenge(req *azpolicy.Request, res *http.Response, authNZ func(azpolicy.TokenRequestOptions) error) error {
+ challenge := res.Header.Get(shared.HeaderWWWAuthenticate)
+ claims, err := parseChallenge(challenge)
+ if err != nil {
+ // the challenge contains claims we can't parse
+ return err
+ } else if claims != "" {
+ // request a new token having the specified claims, send the request again
+ return authNZ(azpolicy.TokenRequestOptions{Claims: claims, EnableCAE: true, Scopes: b.scopes})
+ }
+ // auth challenge didn't include claims, so this is a simple authorization failure
+ return azruntime.NewResponseError(res)
+}
+
+// onRequest authorizes requests with one or more bearer tokens
+func (b *BearerTokenPolicy) onRequest(req *azpolicy.Request, authNZ func(azpolicy.TokenRequestOptions) error) error {
+ // authorize the request with a token for the primary tenant
+ err := authNZ(azpolicy.TokenRequestOptions{EnableCAE: true, Scopes: b.scopes})
+ if err != nil || len(b.auxResources) == 0 {
+ return err
+ }
+ // add tokens for auxiliary tenants
+ as := acquiringResourceState{
+ ctx: req.Raw().Context(),
+ p: b,
+ }
+ auxTokens := make([]string, 0, len(b.auxResources))
+ for tenant, er := range b.auxResources {
+ as.tenant = tenant
+ auxTk, err := er.Get(as)
+ if err != nil {
+ return err
+ }
+ auxTokens = append(auxTokens, fmt.Sprintf("%s%s", shared.BearerTokenPrefix, auxTk.Token))
+ }
+ req.Raw().Header.Set(headerAuxiliaryAuthorization, strings.Join(auxTokens, ", "))
+ return nil
+}
+
+// Do authorizes a request with a bearer token
+func (b *BearerTokenPolicy) Do(req *azpolicy.Request) (*http.Response, error) {
+ return b.btp.Do(req)
+}
+
+// parseChallenge parses claims from an authentication challenge issued by ARM so a client can request a token
+// that will satisfy conditional access policies. It returns a non-nil error when the given value contains
+// claims it can't parse. If the value contains no claims, it returns an empty string and a nil error.
+func parseChallenge(wwwAuthenticate string) (string, error) {
+ claims := ""
+ var err error
+ for _, param := range strings.Split(wwwAuthenticate, ",") {
+ if _, after, found := strings.Cut(param, "claims="); found {
+ if claims != "" {
+ // The header contains multiple challenges, at least two of which specify claims. The specs allow this
+ // but it's unclear what a client should do in this case and there's as yet no concrete example of it.
+ err = fmt.Errorf("found multiple claims challenges in %q", wwwAuthenticate)
+ break
+ }
+ // trim stuff that would get an error from RawURLEncoding; claims may or may not be padded
+ claims = strings.Trim(after, `\"=`)
+ // we don't return this error because it's something unhelpful like "illegal base64 data at input byte 42"
+ if b, decErr := base64.RawURLEncoding.DecodeString(claims); decErr == nil {
+ claims = string(b)
+ } else {
+ err = fmt.Errorf("failed to parse claims from %q", wwwAuthenticate)
+ break
+ }
+ }
+ }
+ return claims, err
+}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/policy_register_rp.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/policy_register_rp.go
new file mode 100644
index 0000000000..7380f02398
--- /dev/null
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/policy_register_rp.go
@@ -0,0 +1,346 @@
+//go:build go1.18
+// +build go1.18
+
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+package runtime
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "net/http"
+ "net/url"
+ "strings"
+ "time"
+
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore"
+ armpolicy "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/policy"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared"
+ azpolicy "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
+ "github.com/Azure/azure-sdk-for-go/sdk/internal/log"
+)
+
+const (
+ // LogRPRegistration entries contain information specific to the automatic registration of an RP.
+ // Entries of this classification are written IFF the policy needs to take any action.
+ LogRPRegistration log.Event = "RPRegistration"
+)
+
+// init sets any default values
+func setDefaults(r *armpolicy.RegistrationOptions) {
+ if r.MaxAttempts == 0 {
+ r.MaxAttempts = 3
+ } else if r.MaxAttempts < 0 {
+ r.MaxAttempts = 0
+ }
+ if r.PollingDelay == 0 {
+ r.PollingDelay = 15 * time.Second
+ } else if r.PollingDelay < 0 {
+ r.PollingDelay = 0
+ }
+ if r.PollingDuration == 0 {
+ r.PollingDuration = 5 * time.Minute
+ }
+}
+
+// NewRPRegistrationPolicy creates a policy object configured using the specified options.
+// The policy controls whether an unregistered resource provider should automatically be
+// registered. See https://aka.ms/rps-not-found for more information.
+func NewRPRegistrationPolicy(cred azcore.TokenCredential, o *armpolicy.RegistrationOptions) (azpolicy.Policy, error) {
+ if o == nil {
+ o = &armpolicy.RegistrationOptions{}
+ }
+ conf, err := getConfiguration(&o.ClientOptions)
+ if err != nil {
+ return nil, err
+ }
+ authPolicy := NewBearerTokenPolicy(cred, &armpolicy.BearerTokenOptions{Scopes: []string{conf.Audience + "/.default"}})
+ p := &rpRegistrationPolicy{
+ endpoint: conf.Endpoint,
+ pipeline: runtime.NewPipeline(shared.Module, shared.Version, runtime.PipelineOptions{PerRetry: []azpolicy.Policy{authPolicy}}, &o.ClientOptions),
+ options: *o,
+ }
+ // init the copy
+ setDefaults(&p.options)
+ return p, nil
+}
+
+type rpRegistrationPolicy struct {
+ endpoint string
+ pipeline runtime.Pipeline
+ options armpolicy.RegistrationOptions
+}
+
+func (r *rpRegistrationPolicy) Do(req *azpolicy.Request) (*http.Response, error) {
+ if r.options.MaxAttempts == 0 {
+ // policy is disabled
+ return req.Next()
+ }
+ const registeredState = "Registered"
+ var rp string
+ var resp *http.Response
+ for attempts := 0; attempts < r.options.MaxAttempts; attempts++ {
+ var err error
+ // make the original request
+ resp, err = req.Next()
+ // getting a 409 is the first indication that the RP might need to be registered, check error response
+ if err != nil || resp.StatusCode != http.StatusConflict {
+ return resp, err
+ }
+ var reqErr requestError
+ if err = runtime.UnmarshalAsJSON(resp, &reqErr); err != nil {
+ return resp, err
+ }
+ if reqErr.ServiceError == nil {
+ // missing service error info. just return the response
+ // to the caller so its error unmarshalling will kick in
+ return resp, err
+ }
+ if !isUnregisteredRPCode(reqErr.ServiceError.Code) {
+ // not a 409 due to unregistered RP. just return the response
+ // to the caller so its error unmarshalling will kick in
+ return resp, err
+ }
+ // RP needs to be registered. start by getting the subscription ID from the original request
+ subID, err := getSubscription(req.Raw().URL.Path)
+ if err != nil {
+ return resp, err
+ }
+ // now get the RP from the error
+ rp, err = getProvider(reqErr)
+ if err != nil {
+ return resp, err
+ }
+ logRegistrationExit := func(v interface{}) {
+ log.Writef(LogRPRegistration, "END registration for %s: %v", rp, v)
+ }
+ log.Writef(LogRPRegistration, "BEGIN registration for %s", rp)
+ // create client and make the registration request
+ // we use the scheme and host from the original request
+ rpOps := &providersOperations{
+ p: r.pipeline,
+ u: r.endpoint,
+ subID: subID,
+ }
+ if _, err = rpOps.Register(req.Raw().Context(), rp); err != nil {
+ logRegistrationExit(err)
+ return resp, err
+ }
+ // RP was registered, however we need to wait for the registration to complete
+ pollCtx, pollCancel := context.WithTimeout(req.Raw().Context(), r.options.PollingDuration)
+ var lastRegState string
+ for {
+ // get the current registration state
+ getResp, err := rpOps.Get(pollCtx, rp)
+ if err != nil {
+ pollCancel()
+ logRegistrationExit(err)
+ return resp, err
+ }
+ if getResp.Provider.RegistrationState != nil && !strings.EqualFold(*getResp.Provider.RegistrationState, lastRegState) {
+ // registration state has changed, or was updated for the first time
+ lastRegState = *getResp.Provider.RegistrationState
+ log.Writef(LogRPRegistration, "registration state is %s", lastRegState)
+ }
+ if strings.EqualFold(lastRegState, registeredState) {
+ // registration complete
+ pollCancel()
+ logRegistrationExit(lastRegState)
+ break
+ }
+ // wait before trying again
+ select {
+ case <-time.After(r.options.PollingDelay):
+ // continue polling
+ case <-pollCtx.Done():
+ pollCancel()
+ logRegistrationExit(pollCtx.Err())
+ return resp, pollCtx.Err()
+ }
+ }
+ // RP was successfully registered, retry the original request
+ err = req.RewindBody()
+ if err != nil {
+ return resp, err
+ }
+ }
+ // if we get here it means we exceeded the number of attempts
+ return resp, fmt.Errorf("exceeded attempts to register %s", rp)
+}
+
+var unregisteredRPCodes = []string{
+ "MissingSubscriptionRegistration",
+ "MissingRegistrationForResourceProvider",
+ "Subscription Not Registered",
+ "SubscriptionNotRegistered",
+}
+
+func isUnregisteredRPCode(errorCode string) bool {
+ for _, code := range unregisteredRPCodes {
+ if strings.EqualFold(errorCode, code) {
+ return true
+ }
+ }
+ return false
+}
+
+func getSubscription(path string) (string, error) {
+ parts := strings.Split(path, "/")
+ for i, v := range parts {
+ if v == "subscriptions" && (i+1) < len(parts) {
+ return parts[i+1], nil
+ }
+ }
+ return "", fmt.Errorf("failed to obtain subscription ID from %s", path)
+}
+
+func getProvider(re requestError) (string, error) {
+ if len(re.ServiceError.Details) > 0 {
+ return re.ServiceError.Details[0].Target, nil
+ }
+ return "", errors.New("unexpected empty Details")
+}
+
+// minimal error definitions to simplify detection
+type requestError struct {
+ ServiceError *serviceError `json:"error"`
+}
+
+type serviceError struct {
+ Code string `json:"code"`
+ Details []serviceErrorDetails `json:"details"`
+}
+
+type serviceErrorDetails struct {
+ Code string `json:"code"`
+ Target string `json:"target"`
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////
+// the following code was copied from module armresources, providers.go and models.go
+// only the minimum amount of code was copied to get this working and some edits were made.
+///////////////////////////////////////////////////////////////////////////////////////////////
+
+type providersOperations struct {
+ p runtime.Pipeline
+ u string
+ subID string
+}
+
+// Get - Gets the specified resource provider.
+func (client *providersOperations) Get(ctx context.Context, resourceProviderNamespace string) (providerResponse, error) {
+ req, err := client.getCreateRequest(ctx, resourceProviderNamespace)
+ if err != nil {
+ return providerResponse{}, err
+ }
+ resp, err := client.p.Do(req)
+ if err != nil {
+ return providerResponse{}, err
+ }
+ result, err := client.getHandleResponse(resp)
+ if err != nil {
+ return providerResponse{}, err
+ }
+ return result, nil
+}
+
+// getCreateRequest creates the Get request.
+func (client *providersOperations) getCreateRequest(ctx context.Context, resourceProviderNamespace string) (*azpolicy.Request, error) {
+ urlPath := "/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}"
+ urlPath = strings.ReplaceAll(urlPath, "{resourceProviderNamespace}", url.PathEscape(resourceProviderNamespace))
+ urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subID))
+ req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.u, urlPath))
+ if err != nil {
+ return nil, err
+ }
+ query := req.Raw().URL.Query()
+ query.Set("api-version", "2019-05-01")
+ req.Raw().URL.RawQuery = query.Encode()
+ return req, nil
+}
+
+// getHandleResponse handles the Get response.
+func (client *providersOperations) getHandleResponse(resp *http.Response) (providerResponse, error) {
+ if !runtime.HasStatusCode(resp, http.StatusOK) {
+ return providerResponse{}, exported.NewResponseError(resp)
+ }
+ result := providerResponse{RawResponse: resp}
+ err := runtime.UnmarshalAsJSON(resp, &result.Provider)
+ if err != nil {
+ return providerResponse{}, err
+ }
+ return result, err
+}
+
+// Register - Registers a subscription with a resource provider.
+func (client *providersOperations) Register(ctx context.Context, resourceProviderNamespace string) (providerResponse, error) {
+ req, err := client.registerCreateRequest(ctx, resourceProviderNamespace)
+ if err != nil {
+ return providerResponse{}, err
+ }
+ resp, err := client.p.Do(req)
+ if err != nil {
+ return providerResponse{}, err
+ }
+ result, err := client.registerHandleResponse(resp)
+ if err != nil {
+ return providerResponse{}, err
+ }
+ return result, nil
+}
+
+// registerCreateRequest creates the Register request.
+func (client *providersOperations) registerCreateRequest(ctx context.Context, resourceProviderNamespace string) (*azpolicy.Request, error) {
+ urlPath := "/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}/register"
+ urlPath = strings.ReplaceAll(urlPath, "{resourceProviderNamespace}", url.PathEscape(resourceProviderNamespace))
+ urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subID))
+ req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.u, urlPath))
+ if err != nil {
+ return nil, err
+ }
+ query := req.Raw().URL.Query()
+ query.Set("api-version", "2019-05-01")
+ req.Raw().URL.RawQuery = query.Encode()
+ return req, nil
+}
+
+// registerHandleResponse handles the Register response.
+func (client *providersOperations) registerHandleResponse(resp *http.Response) (providerResponse, error) {
+ if !runtime.HasStatusCode(resp, http.StatusOK) {
+ return providerResponse{}, exported.NewResponseError(resp)
+ }
+ result := providerResponse{RawResponse: resp}
+ err := runtime.UnmarshalAsJSON(resp, &result.Provider)
+ if err != nil {
+ return providerResponse{}, err
+ }
+ return result, err
+}
+
+// ProviderResponse is the response envelope for operations that return a Provider type.
+type providerResponse struct {
+ // Resource provider information.
+ Provider *provider
+
+ // RawResponse contains the underlying HTTP response.
+ RawResponse *http.Response
+}
+
+// Provider - Resource provider information.
+type provider struct {
+ // The provider ID.
+ ID *string `json:"id,omitempty"`
+
+ // The namespace of the resource provider.
+ Namespace *string `json:"namespace,omitempty"`
+
+ // The registration policy of the resource provider.
+ RegistrationPolicy *string `json:"registrationPolicy,omitempty"`
+
+ // The registration state of the resource provider.
+ RegistrationState *string `json:"registrationState,omitempty"`
+}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/runtime.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/runtime.go
new file mode 100644
index 0000000000..1400d43799
--- /dev/null
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/runtime.go
@@ -0,0 +1,24 @@
+//go:build go1.16
+// +build go1.16
+
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+package runtime
+
+import "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud"
+
+func init() {
+ cloud.AzureChina.Services[cloud.ResourceManager] = cloud.ServiceConfiguration{
+ Audience: "https://management.core.chinacloudapi.cn",
+ Endpoint: "https://management.chinacloudapi.cn",
+ }
+ cloud.AzureGovernment.Services[cloud.ResourceManager] = cloud.ServiceConfiguration{
+ Audience: "https://management.core.usgovcloudapi.net",
+ Endpoint: "https://management.usgovcloudapi.net",
+ }
+ cloud.AzurePublic.Services[cloud.ResourceManager] = cloud.ServiceConfiguration{
+ Audience: "https://management.core.windows.net/",
+ Endpoint: "https://management.azure.com",
+ }
+}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/core.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/core.go
index f9fb23422d..9f051ba4ae 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/core.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/core.go
@@ -7,24 +7,37 @@
package azcore
import (
- "context"
"reflect"
- "time"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing"
)
// AccessToken represents an Azure service bearer access token with expiry information.
-type AccessToken struct {
- Token string
- ExpiresOn time.Time
-}
+type AccessToken = exported.AccessToken
// TokenCredential represents a credential capable of providing an OAuth token.
-type TokenCredential interface {
- // GetToken requests an access token for the specified set of scopes.
- GetToken(ctx context.Context, options policy.TokenRequestOptions) (AccessToken, error)
+type TokenCredential = exported.TokenCredential
+
+// KeyCredential contains an authentication key used to authenticate to an Azure service.
+type KeyCredential = exported.KeyCredential
+
+// NewKeyCredential creates a new instance of [KeyCredential] with the specified values.
+// - key is the authentication key
+func NewKeyCredential(key string) *KeyCredential {
+ return exported.NewKeyCredential(key)
+}
+
+// SASCredential contains a shared access signature used to authenticate to an Azure service.
+type SASCredential = exported.SASCredential
+
+// NewSASCredential creates a new instance of [SASCredential] with the specified values.
+// - sas is the shared access signature
+func NewSASCredential(sas string) *SASCredential {
+ return exported.NewSASCredential(sas)
}
// holds sentinel values used to send nulls
@@ -73,3 +86,65 @@ func IsNullValue[T any](v T) bool {
// ClientOptions contains configuration settings for a client's pipeline.
type ClientOptions = policy.ClientOptions
+
+// Client is a basic HTTP client. It consists of a pipeline and tracing provider.
+type Client struct {
+ pl runtime.Pipeline
+ tr tracing.Tracer
+
+ // cached on the client to support shallow copying with new values
+ tp tracing.Provider
+ modVer string
+}
+
+// NewClient creates a new Client instance with the provided values.
+// - clientName - the fully qualified name of the client ("module/package.Client"); this is used by the telemetry policy and tracing provider.
+// if module and package are the same value, the "module/" prefix can be omitted.
+// - moduleVersion - the semantic version of the containing module; used by the telemetry policy
+// - plOpts - pipeline configuration options; can be the zero-value
+// - options - optional client configurations; pass nil to accept the default values
+func NewClient(clientName, moduleVersion string, plOpts runtime.PipelineOptions, options *ClientOptions) (*Client, error) {
+ mod, client, err := shared.ExtractModuleName(clientName)
+ if err != nil {
+ return nil, err
+ }
+
+ if options == nil {
+ options = &ClientOptions{}
+ }
+
+ if !options.Telemetry.Disabled {
+ if err := shared.ValidateModVer(moduleVersion); err != nil {
+ return nil, err
+ }
+ }
+
+ pl := runtime.NewPipeline(mod, moduleVersion, plOpts, options)
+
+ tr := options.TracingProvider.NewTracer(client, moduleVersion)
+
+ return &Client{
+ pl: pl,
+ tr: tr,
+ tp: options.TracingProvider,
+ modVer: moduleVersion,
+ }, nil
+}
+
+// Pipeline returns the pipeline for this client.
+func (c *Client) Pipeline() runtime.Pipeline {
+ return c.pl
+}
+
+// Tracer returns the tracer for this client.
+func (c *Client) Tracer() tracing.Tracer {
+ return c.tr
+}
+
+// WithClientName returns a shallow copy of the Client with its tracing client name changed to clientName.
+// Note that the values for module name and version will be preserved from the source Client.
+// - clientName - the fully qualified name of the client ("package.Client"); this is used by the tracing provider when creating spans
+func (c *Client) WithClientName(clientName string) *Client {
+ tr := c.tp.NewTracer(clientName, c.modVer)
+ return &Client{pl: c.pl, tr: tr, tp: c.tp, modVer: c.modVer}
+}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/exported.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/exported.go
index 6e029d493c..e793b31db2 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/exported.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/exported.go
@@ -7,10 +7,11 @@
package exported
import (
+ "context"
"io"
"net/http"
-
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared"
+ "sync/atomic"
+ "time"
)
type nopCloser struct {
@@ -41,20 +42,100 @@ func HasStatusCode(resp *http.Response, statusCodes ...int) bool {
return false
}
-// Payload reads and returns the response body or an error.
-// On a successful read, the response body is cached.
-// Subsequent reads will access the cached value.
-// Exported as runtime.Payload().
-func Payload(resp *http.Response) ([]byte, error) {
- // r.Body won't be a nopClosingBytesReader if downloading was skipped
- if buf, ok := resp.Body.(*shared.NopClosingBytesReader); ok {
- return buf.Bytes(), nil
- }
- bytesBody, err := io.ReadAll(resp.Body)
- resp.Body.Close()
- if err != nil {
- return nil, err
- }
- resp.Body = shared.NewNopClosingBytesReader(bytesBody)
- return bytesBody, nil
+// AccessToken represents an Azure service bearer access token with expiry information.
+// Exported as azcore.AccessToken.
+type AccessToken struct {
+ Token string
+ ExpiresOn time.Time
+}
+
+// TokenRequestOptions contain specific parameter that may be used by credentials types when attempting to get a token.
+// Exported as policy.TokenRequestOptions.
+type TokenRequestOptions struct {
+ // Claims are any additional claims required for the token to satisfy a conditional access policy, such as a
+ // service may return in a claims challenge following an authorization failure. If a service returned the
+ // claims value base64 encoded, it must be decoded before setting this field.
+ Claims string
+
+ // EnableCAE indicates whether to enable Continuous Access Evaluation (CAE) for the requested token. When true,
+ // azidentity credentials request CAE tokens for resource APIs supporting CAE. Clients are responsible for
+ // handling CAE challenges. If a client that doesn't handle CAE challenges receives a CAE token, it may end up
+ // in a loop retrying an API call with a token that has been revoked due to CAE.
+ EnableCAE bool
+
+ // Scopes contains the list of permission scopes required for the token.
+ Scopes []string
+
+ // TenantID identifies the tenant from which to request the token. azidentity credentials authenticate in
+ // their configured default tenants when this field isn't set.
+ TenantID string
+}
+
+// TokenCredential represents a credential capable of providing an OAuth token.
+// Exported as azcore.TokenCredential.
+type TokenCredential interface {
+ // GetToken requests an access token for the specified set of scopes.
+ GetToken(ctx context.Context, options TokenRequestOptions) (AccessToken, error)
+}
+
+// KeyCredential contains an authentication key used to authenticate to an Azure service.
+// Exported as azcore.KeyCredential.
+type KeyCredential struct {
+ cred *keyCredential
+}
+
+// NewKeyCredential creates a new instance of [KeyCredential] with the specified values.
+// - key is the authentication key
+func NewKeyCredential(key string) *KeyCredential {
+ return &KeyCredential{cred: newKeyCredential(key)}
+}
+
+// Update replaces the existing key with the specified value.
+func (k *KeyCredential) Update(key string) {
+ k.cred.Update(key)
+}
+
+// SASCredential contains a shared access signature used to authenticate to an Azure service.
+// Exported as azcore.SASCredential.
+type SASCredential struct {
+ cred *keyCredential
+}
+
+// NewSASCredential creates a new instance of [SASCredential] with the specified values.
+// - sas is the shared access signature
+func NewSASCredential(sas string) *SASCredential {
+ return &SASCredential{cred: newKeyCredential(sas)}
+}
+
+// Update replaces the existing shared access signature with the specified value.
+func (k *SASCredential) Update(sas string) {
+ k.cred.Update(sas)
+}
+
+// KeyCredentialGet returns the key for cred.
+func KeyCredentialGet(cred *KeyCredential) string {
+ return cred.cred.Get()
+}
+
+// SASCredentialGet returns the shared access sig for cred.
+func SASCredentialGet(cred *SASCredential) string {
+ return cred.cred.Get()
+}
+
+type keyCredential struct {
+ key atomic.Value // string
+}
+
+func newKeyCredential(key string) *keyCredential {
+ keyCred := keyCredential{}
+ keyCred.key.Store(key)
+ return &keyCred
+}
+
+func (k *keyCredential) Get() string {
+ return k.key.Load().(string)
+}
+
+func (k *keyCredential) Update(key string) {
+ k.key.Store(key)
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/request.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/request.go
index 4aeec15893..fa99d1b7ed 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/request.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/request.go
@@ -100,32 +100,47 @@ func (req *Request) OperationValue(value interface{}) bool {
return req.values.get(value)
}
-// SetBody sets the specified ReadSeekCloser as the HTTP request body.
+// SetBody sets the specified ReadSeekCloser as the HTTP request body, and sets Content-Type and Content-Length
+// accordingly. If the ReadSeekCloser is nil or empty, Content-Length won't be set. If contentType is "",
+// Content-Type won't be set.
+// Use streaming.NopCloser to turn an io.ReadSeeker into an io.ReadSeekCloser.
func (req *Request) SetBody(body io.ReadSeekCloser, contentType string) error {
- // Set the body and content length.
- size, err := body.Seek(0, io.SeekEnd) // Seek to the end to get the stream's size
- if err != nil {
- return err
+ var err error
+ var size int64
+ if body != nil {
+ size, err = body.Seek(0, io.SeekEnd) // Seek to the end to get the stream's size
+ if err != nil {
+ return err
+ }
}
if size == 0 {
- body.Close()
- return nil
- }
- _, err = body.Seek(0, io.SeekStart)
- if err != nil {
- return err
+ // treat an empty stream the same as a nil one: assign req a nil body
+ body = nil
+ // RFC 9110 specifies a client shouldn't set Content-Length on a request containing no content
+ // (Del is a no-op when the header has no value)
+ req.req.Header.Del(shared.HeaderContentLength)
+ } else {
+ _, err = body.Seek(0, io.SeekStart)
+ if err != nil {
+ return err
+ }
+ req.req.Header.Set(shared.HeaderContentLength, strconv.FormatInt(size, 10))
+ req.Raw().GetBody = func() (io.ReadCloser, error) {
+ _, err := body.Seek(0, io.SeekStart) // Seek back to the beginning of the stream
+ return body, err
+ }
}
- req.Raw().GetBody = func() (io.ReadCloser, error) {
- _, err := body.Seek(0, io.SeekStart) // Seek back to the beginning of the stream
- return body, err
- }
- // keep a copy of the original body. this is to handle cases
+ // keep a copy of the body argument. this is to handle cases
// where req.Body is replaced, e.g. httputil.DumpRequest and friends.
req.body = body
req.req.Body = body
req.req.ContentLength = size
- req.req.Header.Set(shared.HeaderContentType, contentType)
- req.req.Header.Set(shared.HeaderContentLength, strconv.FormatInt(size, 10))
+ if contentType == "" {
+ // Del is a no-op when the header has no value
+ req.req.Header.Del(shared.HeaderContentType)
+ } else {
+ req.req.Header.Set(shared.HeaderContentType, contentType)
+ }
return nil
}
@@ -154,3 +169,14 @@ func (req *Request) Clone(ctx context.Context) *Request {
r2.req = req.req.Clone(ctx)
return &r2
}
+
+// not exported but dependent on Request
+
+// PolicyFunc is a type that implements the Policy interface.
+// Use this type when implementing a stateless policy as a first-class function.
+type PolicyFunc func(*Request) (*http.Response, error)
+
+// Do implements the Policy interface on policyFunc.
+func (pf PolicyFunc) Do(req *Request) (*http.Response, error) {
+ return pf(req)
+}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/response_error.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/response_error.go
index 3db6acc832..7df2f88c1c 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/response_error.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/response_error.go
@@ -12,6 +12,8 @@ import (
"fmt"
"net/http"
"regexp"
+
+ "github.com/Azure/azure-sdk-for-go/sdk/internal/exported"
)
// NewResponseError creates a new *ResponseError from the provided HTTP response.
@@ -29,7 +31,7 @@ func NewResponseError(resp *http.Response) error {
}
// if we didn't get x-ms-error-code, check in the response body
- body, err := Payload(resp)
+ body, err := exported.Payload(resp, nil)
if err != nil {
return err
}
@@ -121,7 +123,7 @@ func (e *ResponseError) Error() string {
fmt.Fprintln(msg, "ERROR CODE UNAVAILABLE")
}
fmt.Fprintln(msg, "--------------------------------------------------------------------------------")
- body, err := Payload(e.RawResponse)
+ body, err := exported.Payload(e.RawResponse, nil)
if err != nil {
// this really shouldn't fail at this point as the response
// body is already cached (it was read in NewResponseError)
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/async/async.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/async/async.go
index d34f161c7b..b05bd8b38d 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/async/async.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/async/async.go
@@ -16,6 +16,7 @@ import (
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared"
+ "github.com/Azure/azure-sdk-for-go/sdk/internal/poller"
)
// see https://github.com/Azure/azure-resource-manager-rpc/blob/master/v1.0/async-api-reference.md
@@ -68,15 +69,15 @@ func New[T any](pl exported.Pipeline, resp *http.Response, finalState pollers.Fi
if asyncURL == "" {
return nil, errors.New("response is missing Azure-AsyncOperation header")
}
- if !pollers.IsValidURL(asyncURL) {
+ if !poller.IsValidURL(asyncURL) {
return nil, fmt.Errorf("invalid polling URL %s", asyncURL)
}
// check for provisioning state. if the operation is a RELO
// and terminates synchronously this will prevent extra polling.
// it's ok if there's no provisioning state.
- state, _ := pollers.GetProvisioningState(resp)
+ state, _ := poller.GetProvisioningState(resp)
if state == "" {
- state = pollers.StatusInProgress
+ state = poller.StatusInProgress
}
p := &Poller[T]{
pl: pl,
@@ -93,17 +94,17 @@ func New[T any](pl exported.Pipeline, resp *http.Response, finalState pollers.Fi
// Done returns true if the LRO is in a terminal state.
func (p *Poller[T]) Done() bool {
- return pollers.IsTerminalState(p.CurState)
+ return poller.IsTerminalState(p.CurState)
}
// Poll retrieves the current state of the LRO.
func (p *Poller[T]) Poll(ctx context.Context) (*http.Response, error) {
err := pollers.PollHelper(ctx, p.AsyncURL, p.pl, func(resp *http.Response) (string, error) {
- if !pollers.StatusCodeValid(resp) {
+ if !poller.StatusCodeValid(resp) {
p.resp = resp
return "", exported.NewResponseError(resp)
}
- state, err := pollers.GetStatus(resp)
+ state, err := poller.GetStatus(resp)
if err != nil {
return "", err
} else if state == "" {
@@ -122,7 +123,7 @@ func (p *Poller[T]) Poll(ctx context.Context) (*http.Response, error) {
func (p *Poller[T]) Result(ctx context.Context, out *T) error {
if p.resp.StatusCode == http.StatusNoContent {
return nil
- } else if pollers.Failed(p.CurState) {
+ } else if poller.Failed(p.CurState) {
return exported.NewResponseError(p.resp)
}
var req *exported.Request
@@ -154,5 +155,5 @@ func (p *Poller[T]) Result(ctx context.Context, out *T) error {
p.resp = resp
}
- return pollers.ResultHelper(p.resp, pollers.Failed(p.CurState), out)
+ return pollers.ResultHelper(p.resp, poller.Failed(p.CurState), out)
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/body/body.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/body/body.go
index 7efdd8a0df..2bb9e105b6 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/body/body.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/body/body.go
@@ -14,6 +14,7 @@ import (
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers"
+ "github.com/Azure/azure-sdk-for-go/sdk/internal/poller"
)
// Kind is the identifier of this type in a resume token.
@@ -72,9 +73,9 @@ func New[T any](pl exported.Pipeline, resp *http.Response) (*Poller[T], error) {
}
// default initial state to InProgress. depending on the HTTP
// status code and provisioning state, we might change the value.
- curState := pollers.StatusInProgress
- provState, err := pollers.GetProvisioningState(resp)
- if err != nil && !errors.Is(err, pollers.ErrNoBody) {
+ curState := poller.StatusInProgress
+ provState, err := poller.GetProvisioningState(resp)
+ if err != nil && !errors.Is(err, poller.ErrNoBody) {
return nil, err
}
if resp.StatusCode == http.StatusCreated && provState != "" {
@@ -85,37 +86,37 @@ func New[T any](pl exported.Pipeline, resp *http.Response) (*Poller[T], error) {
curState = provState
} else if provState == "" {
// for a 200, absense of provisioning state indicates success
- curState = pollers.StatusSucceeded
+ curState = poller.StatusSucceeded
}
} else if resp.StatusCode == http.StatusNoContent {
- curState = pollers.StatusSucceeded
+ curState = poller.StatusSucceeded
}
p.CurState = curState
return p, nil
}
func (p *Poller[T]) Done() bool {
- return pollers.IsTerminalState(p.CurState)
+ return poller.IsTerminalState(p.CurState)
}
func (p *Poller[T]) Poll(ctx context.Context) (*http.Response, error) {
err := pollers.PollHelper(ctx, p.PollURL, p.pl, func(resp *http.Response) (string, error) {
- if !pollers.StatusCodeValid(resp) {
+ if !poller.StatusCodeValid(resp) {
p.resp = resp
return "", exported.NewResponseError(resp)
}
if resp.StatusCode == http.StatusNoContent {
p.resp = resp
- p.CurState = pollers.StatusSucceeded
+ p.CurState = poller.StatusSucceeded
return p.CurState, nil
}
- state, err := pollers.GetProvisioningState(resp)
- if errors.Is(err, pollers.ErrNoBody) {
+ state, err := poller.GetProvisioningState(resp)
+ if errors.Is(err, poller.ErrNoBody) {
// a missing response body in non-204 case is an error
return "", err
} else if state == "" {
// a response body without provisioning state is considered terminal success
- state = pollers.StatusSucceeded
+ state = poller.StatusSucceeded
} else if err != nil {
return "", err
}
@@ -130,5 +131,5 @@ func (p *Poller[T]) Poll(ctx context.Context) (*http.Response, error) {
}
func (p *Poller[T]) Result(ctx context.Context, out *T) error {
- return pollers.ResultHelper(p.resp, pollers.Failed(p.CurState), out)
+ return pollers.ResultHelper(p.resp, poller.Failed(p.CurState), out)
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/loc/loc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/loc/loc.go
index 276685da44..d6be89876a 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/loc/loc.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/loc/loc.go
@@ -16,6 +16,7 @@ import (
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared"
+ "github.com/Azure/azure-sdk-for-go/sdk/internal/poller"
)
// Kind is the identifier of this type in a resume token.
@@ -61,15 +62,15 @@ func New[T any](pl exported.Pipeline, resp *http.Response) (*Poller[T], error) {
if locURL == "" {
return nil, errors.New("response is missing Location header")
}
- if !pollers.IsValidURL(locURL) {
+ if !poller.IsValidURL(locURL) {
return nil, fmt.Errorf("invalid polling URL %s", locURL)
}
// check for provisioning state. if the operation is a RELO
// and terminates synchronously this will prevent extra polling.
// it's ok if there's no provisioning state.
- state, _ := pollers.GetProvisioningState(resp)
+ state, _ := poller.GetProvisioningState(resp)
if state == "" {
- state = pollers.StatusInProgress
+ state = poller.StatusInProgress
}
return &Poller[T]{
pl: pl,
@@ -81,7 +82,7 @@ func New[T any](pl exported.Pipeline, resp *http.Response) (*Poller[T], error) {
}
func (p *Poller[T]) Done() bool {
- return pollers.IsTerminalState(p.CurState)
+ return poller.IsTerminalState(p.CurState)
}
func (p *Poller[T]) Poll(ctx context.Context) (*http.Response, error) {
@@ -93,17 +94,17 @@ func (p *Poller[T]) Poll(ctx context.Context) (*http.Response, error) {
// if provisioning state is available, use that. this is only
// for some ARM LRO scenarios (e.g. DELETE with a Location header)
// so if it's missing then use HTTP status code.
- provState, _ := pollers.GetProvisioningState(resp)
+ provState, _ := poller.GetProvisioningState(resp)
p.resp = resp
if provState != "" {
p.CurState = provState
} else if resp.StatusCode == http.StatusAccepted {
- p.CurState = pollers.StatusInProgress
+ p.CurState = poller.StatusInProgress
} else if resp.StatusCode > 199 && resp.StatusCode < 300 {
// any 2xx other than a 202 indicates success
- p.CurState = pollers.StatusSucceeded
+ p.CurState = poller.StatusSucceeded
} else {
- p.CurState = pollers.StatusFailed
+ p.CurState = poller.StatusFailed
}
return p.CurState, nil
})
@@ -114,5 +115,5 @@ func (p *Poller[T]) Poll(ctx context.Context) (*http.Response, error) {
}
func (p *Poller[T]) Result(ctx context.Context, out *T) error {
- return pollers.ResultHelper(p.resp, pollers.Failed(p.CurState), out)
+ return pollers.ResultHelper(p.resp, poller.Failed(p.CurState), out)
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op/op.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op/op.go
index c3c648266a..1bc7ad0ace 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op/op.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op/op.go
@@ -16,6 +16,7 @@ import (
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared"
+ "github.com/Azure/azure-sdk-for-go/sdk/internal/poller"
)
// Applicable returns true if the LRO is using Operation-Location.
@@ -54,19 +55,19 @@ func New[T any](pl exported.Pipeline, resp *http.Response, finalState pollers.Fi
if opURL == "" {
return nil, errors.New("response is missing Operation-Location header")
}
- if !pollers.IsValidURL(opURL) {
+ if !poller.IsValidURL(opURL) {
return nil, fmt.Errorf("invalid Operation-Location URL %s", opURL)
}
locURL := resp.Header.Get(shared.HeaderLocation)
// Location header is optional
- if locURL != "" && !pollers.IsValidURL(locURL) {
+ if locURL != "" && !poller.IsValidURL(locURL) {
return nil, fmt.Errorf("invalid Location URL %s", locURL)
}
// default initial state to InProgress. if the
// service sent us a status then use that instead.
- curState := pollers.StatusInProgress
- status, err := pollers.GetStatus(resp)
- if err != nil && !errors.Is(err, pollers.ErrNoBody) {
+ curState := poller.StatusInProgress
+ status, err := poller.GetStatus(resp)
+ if err != nil && !errors.Is(err, poller.ErrNoBody) {
return nil, err
}
if status != "" {
@@ -86,16 +87,16 @@ func New[T any](pl exported.Pipeline, resp *http.Response, finalState pollers.Fi
}
func (p *Poller[T]) Done() bool {
- return pollers.IsTerminalState(p.CurState)
+ return poller.IsTerminalState(p.CurState)
}
func (p *Poller[T]) Poll(ctx context.Context) (*http.Response, error) {
err := pollers.PollHelper(ctx, p.OpLocURL, p.pl, func(resp *http.Response) (string, error) {
- if !pollers.StatusCodeValid(resp) {
+ if !poller.StatusCodeValid(resp) {
p.resp = resp
return "", exported.NewResponseError(resp)
}
- state, err := pollers.GetStatus(resp)
+ state, err := poller.GetStatus(resp)
if err != nil {
return "", err
} else if state == "" {
@@ -118,7 +119,7 @@ func (p *Poller[T]) Result(ctx context.Context, out *T) error {
req, err = exported.NewRequest(ctx, http.MethodGet, p.LocURL)
} else if p.FinalState == pollers.FinalStateViaOpLocation && p.Method == http.MethodPost {
// no final GET required, terminal response should have it
- } else if rl, rlErr := pollers.GetResourceLocation(p.resp); rlErr != nil && !errors.Is(rlErr, pollers.ErrNoBody) {
+ } else if rl, rlErr := poller.GetResourceLocation(p.resp); rlErr != nil && !errors.Is(rlErr, poller.ErrNoBody) {
return rlErr
} else if rl != "" {
req, err = exported.NewRequest(ctx, http.MethodGet, rl)
@@ -140,5 +141,5 @@ func (p *Poller[T]) Result(ctx context.Context, out *T) error {
p.resp = resp
}
- return pollers.ResultHelper(p.resp, pollers.Failed(p.CurState), out)
+ return pollers.ResultHelper(p.resp, poller.Failed(p.CurState), out)
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/util.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/util.go
index 17ab7dadc3..d8d86a46c2 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/util.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/util.go
@@ -12,49 +12,15 @@ import (
"errors"
"fmt"
"net/http"
- "net/url"
"reflect"
- "strings"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported"
+ azexported "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared"
+ "github.com/Azure/azure-sdk-for-go/sdk/internal/exported"
+ "github.com/Azure/azure-sdk-for-go/sdk/internal/poller"
)
-// the well-known set of LRO status/provisioning state values.
-const (
- StatusSucceeded = "Succeeded"
- StatusCanceled = "Canceled"
- StatusFailed = "Failed"
- StatusInProgress = "InProgress"
-)
-
-// IsTerminalState returns true if the LRO's state is terminal.
-func IsTerminalState(s string) bool {
- return strings.EqualFold(s, StatusSucceeded) || strings.EqualFold(s, StatusFailed) || strings.EqualFold(s, StatusCanceled)
-}
-
-// Failed returns true if the LRO's state is terminal failure.
-func Failed(s string) bool {
- return strings.EqualFold(s, StatusFailed) || strings.EqualFold(s, StatusCanceled)
-}
-
-// Succeeded returns true if the LRO's state is terminal success.
-func Succeeded(s string) bool {
- return strings.EqualFold(s, StatusSucceeded)
-}
-
-// returns true if the LRO response contains a valid HTTP status code
-func StatusCodeValid(resp *http.Response) bool {
- return exported.HasStatusCode(resp, http.StatusOK, http.StatusAccepted, http.StatusCreated, http.StatusNoContent)
-}
-
-// IsValidURL verifies that the URL is valid and absolute.
-func IsValidURL(s string) bool {
- u, err := url.Parse(s)
- return err == nil && u.IsAbs()
-}
-
// getTokenTypeName creates a type name from the type parameter T.
func getTokenTypeName[T any]() (string, error) {
tt := shared.TypeOfT[T]()
@@ -130,102 +96,6 @@ func IsTokenValid[T any](token string) error {
return nil
}
-// ErrNoBody is returned if the response didn't contain a body.
-var ErrNoBody = errors.New("the response did not contain a body")
-
-// GetJSON reads the response body into a raw JSON object.
-// It returns ErrNoBody if there was no content.
-func GetJSON(resp *http.Response) (map[string]interface{}, error) {
- body, err := exported.Payload(resp)
- if err != nil {
- return nil, err
- }
- if len(body) == 0 {
- return nil, ErrNoBody
- }
- // unmarshall the body to get the value
- var jsonBody map[string]interface{}
- if err = json.Unmarshal(body, &jsonBody); err != nil {
- return nil, err
- }
- return jsonBody, nil
-}
-
-// provisioningState returns the provisioning state from the response or the empty string.
-func provisioningState(jsonBody map[string]interface{}) string {
- jsonProps, ok := jsonBody["properties"]
- if !ok {
- return ""
- }
- props, ok := jsonProps.(map[string]interface{})
- if !ok {
- return ""
- }
- rawPs, ok := props["provisioningState"]
- if !ok {
- return ""
- }
- ps, ok := rawPs.(string)
- if !ok {
- return ""
- }
- return ps
-}
-
-// status returns the status from the response or the empty string.
-func status(jsonBody map[string]interface{}) string {
- rawStatus, ok := jsonBody["status"]
- if !ok {
- return ""
- }
- status, ok := rawStatus.(string)
- if !ok {
- return ""
- }
- return status
-}
-
-// GetStatus returns the LRO's status from the response body.
-// Typically used for Azure-AsyncOperation flows.
-// If there is no status in the response body the empty string is returned.
-func GetStatus(resp *http.Response) (string, error) {
- jsonBody, err := GetJSON(resp)
- if err != nil {
- return "", err
- }
- return status(jsonBody), nil
-}
-
-// GetProvisioningState returns the LRO's state from the response body.
-// If there is no state in the response body the empty string is returned.
-func GetProvisioningState(resp *http.Response) (string, error) {
- jsonBody, err := GetJSON(resp)
- if err != nil {
- return "", err
- }
- return provisioningState(jsonBody), nil
-}
-
-// GetResourceLocation returns the LRO's resourceLocation value from the response body.
-// Typically used for Operation-Location flows.
-// If there is no resourceLocation in the response body the empty string is returned.
-func GetResourceLocation(resp *http.Response) (string, error) {
- jsonBody, err := GetJSON(resp)
- if err != nil {
- return "", err
- }
- v, ok := jsonBody["resourceLocation"]
- if !ok {
- // it might be ok if the field doesn't exist, the caller must make that determination
- return "", nil
- }
- vv, ok := v.(string)
- if !ok {
- return "", fmt.Errorf("the resourceLocation value %v was not in string format", v)
- }
- return vv, nil
-}
-
// used if the operation synchronously completed
type NopPoller[T any] struct {
resp *http.Response
@@ -239,7 +109,7 @@ func NewNopPoller[T any](resp *http.Response) (*NopPoller[T], error) {
if resp.StatusCode == http.StatusNoContent {
return np, nil
}
- payload, err := exported.Payload(resp)
+ payload, err := exported.Payload(resp, nil)
if err != nil {
return nil, err
}
@@ -269,8 +139,8 @@ func (p *NopPoller[T]) Result(ctx context.Context, out *T) error {
// If the request fails, the update func is not called.
// The update func returns the state of the operation for logging purposes or an error
// if it fails to extract the required state from the response.
-func PollHelper(ctx context.Context, endpoint string, pl exported.Pipeline, update func(resp *http.Response) (string, error)) error {
- req, err := exported.NewRequest(ctx, http.MethodGet, endpoint)
+func PollHelper(ctx context.Context, endpoint string, pl azexported.Pipeline, update func(resp *http.Response) (string, error)) error {
+ req, err := azexported.NewRequest(ctx, http.MethodGet, endpoint)
if err != nil {
return err
}
@@ -296,13 +166,13 @@ func ResultHelper[T any](resp *http.Response, failed bool, out *T) error {
}
defer resp.Body.Close()
- if !StatusCodeValid(resp) || failed {
+ if !poller.StatusCodeValid(resp) || failed {
// the LRO failed. unmarshall the error and update state
- return exported.NewResponseError(resp)
+ return azexported.NewResponseError(resp)
}
// success case
- payload, err := exported.Payload(resp)
+ payload, err := exported.Payload(resp, nil)
if err != nil {
return err
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go
index 75d241c5b4..05e53aa76e 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go
@@ -21,6 +21,8 @@ const (
HeaderOperationLocation = "Operation-Location"
HeaderRetryAfter = "Retry-After"
HeaderUserAgent = "User-Agent"
+ HeaderWWWAuthenticate = "WWW-Authenticate"
+ HeaderXMSClientRequestID = "x-ms-client-request-id"
)
const BearerTokenPrefix = "Bearer "
@@ -30,5 +32,5 @@ const (
Module = "azcore"
// Version is the semantic version (see http://semver.org) of this module.
- Version = "v1.2.0"
+ Version = "v1.8.0"
)
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/shared.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/shared.go
index 96eef2956f..1bf3aca918 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/shared.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/shared.go
@@ -8,12 +8,14 @@ package shared
import (
"context"
- "errors"
- "io"
+ "fmt"
"net/http"
"reflect"
+ "regexp"
"strconv"
"time"
+
+ "github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo"
)
// CtxWithHTTPHeaderKey is used as a context key for adding/retrieving http.Header.
@@ -22,8 +24,8 @@ type CtxWithHTTPHeaderKey struct{}
// CtxWithRetryOptionsKey is used as a context key for adding/retrieving RetryOptions.
type CtxWithRetryOptionsKey struct{}
-// CtxIncludeResponseKey is used as a context key for retrieving the raw response.
-type CtxIncludeResponseKey struct{}
+// CtxWithCaptureResponse is used as a context key for retrieving the raw response.
+type CtxWithCaptureResponse struct{}
// Delay waits for the duration to elapse or the context to be cancelled.
func Delay(ctx context.Context, delay time.Duration) error {
@@ -61,75 +63,66 @@ func TypeOfT[T any]() reflect.Type {
return reflect.TypeOf((*T)(nil)).Elem()
}
-// BytesSetter abstracts replacing a byte slice on some type.
-type BytesSetter interface {
- Set(b []byte)
-}
+// TransportFunc is a helper to use a first-class func to satisfy the Transporter interface.
+type TransportFunc func(*http.Request) (*http.Response, error)
-// NewNopClosingBytesReader creates a new *NopClosingBytesReader for the specified slice.
-func NewNopClosingBytesReader(data []byte) *NopClosingBytesReader {
- return &NopClosingBytesReader{s: data}
+// Do implements the Transporter interface for the TransportFunc type.
+func (pf TransportFunc) Do(req *http.Request) (*http.Response, error) {
+ return pf(req)
}
-// NopClosingBytesReader is an io.ReadSeekCloser around a byte slice.
-// It also provides direct access to the byte slice to avoid rereading.
-type NopClosingBytesReader struct {
- s []byte
- i int64
+// ValidateModVer verifies that moduleVersion is a valid semver 2.0 string.
+func ValidateModVer(moduleVersion string) error {
+ modVerRegx := regexp.MustCompile(`^v\d+\.\d+\.\d+(?:-[a-zA-Z0-9_.-]+)?$`)
+ if !modVerRegx.MatchString(moduleVersion) {
+ return fmt.Errorf("malformed moduleVersion param value %s", moduleVersion)
+ }
+ return nil
}
-// Bytes returns the underlying byte slice.
-func (r *NopClosingBytesReader) Bytes() []byte {
- return r.s
-}
+// ExtractModuleName returns "module", "package.Client" from "module/package.Client" or
+// "package", "package.Client" from "package.Client" when there's no "module/" prefix.
+// If clientName is malformed, an error is returned.
+func ExtractModuleName(clientName string) (string, string, error) {
+ // uses unnamed capturing for "module", "package.Client", and "package"
+ regex, err := regexp.Compile(`^(?:([a-z0-9]+)/)?(([a-z0-9]+)\.(?:[A-Za-z0-9]+))$`)
+ if err != nil {
+ return "", "", err
+ }
-// Close implements the io.Closer interface.
-func (*NopClosingBytesReader) Close() error {
- return nil
-}
+ matches := regex.FindStringSubmatch(clientName)
+ if len(matches) < 4 {
+ return "", "", fmt.Errorf("malformed clientName %s", clientName)
+ }
-// Read implements the io.Reader interface.
-func (r *NopClosingBytesReader) Read(b []byte) (n int, err error) {
- if r.i >= int64(len(r.s)) {
- return 0, io.EOF
+ // the first match is the entire string, the second is "module", the third is
+ // "package.Client" and the fourth is "package".
+ // if there was no "module/" prefix, the second match will be the empty string
+ if matches[1] != "" {
+ return matches[1], matches[2], nil
}
- n = copy(b, r.s[r.i:])
- r.i += int64(n)
- return
+ return matches[3], matches[2], nil
}
-// Set replaces the existing byte slice with the specified byte slice and resets the reader.
-func (r *NopClosingBytesReader) Set(b []byte) {
- r.s = b
- r.i = 0
+// NonRetriableError marks the specified error as non-retriable.
+func NonRetriableError(err error) error {
+ return &nonRetriableError{err}
}
-// Seek implements the io.Seeker interface.
-func (r *NopClosingBytesReader) Seek(offset int64, whence int) (int64, error) {
- var i int64
- switch whence {
- case io.SeekStart:
- i = offset
- case io.SeekCurrent:
- i = r.i + offset
- case io.SeekEnd:
- i = int64(len(r.s)) + offset
- default:
- return 0, errors.New("nopClosingBytesReader: invalid whence")
- }
- if i < 0 {
- return 0, errors.New("nopClosingBytesReader: negative position")
- }
- r.i = i
- return i, nil
+type nonRetriableError struct {
+ error
}
-var _ BytesSetter = (*NopClosingBytesReader)(nil)
+func (p *nonRetriableError) Error() string {
+ return p.error.Error()
+}
-// TransportFunc is a helper to use a first-class func to satisfy the Transporter interface.
-type TransportFunc func(*http.Request) (*http.Response, error)
+func (*nonRetriableError) NonRetriable() {
+ // marker method
+}
-// Do implements the Transporter interface for the TransportFunc type.
-func (pf TransportFunc) Do(req *http.Request) (*http.Response, error) {
- return pf(req)
+func (p *nonRetriableError) Unwrap() error {
+ return p.error
}
+
+var _ errorinfo.NonRetriable = (*nonRetriableError)(nil)
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/policy.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/policy.go
index 27c3022988..f73704cf01 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/policy.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/policy.go
@@ -7,10 +7,13 @@
package policy
import (
+ "context"
+ "net/http"
"time"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing"
)
@@ -98,7 +101,7 @@ type RetryOptions struct {
// MaxRetryDelay specifies the maximum delay allowed before retrying an operation.
// Typically the value is greater than or equal to the value specified in RetryDelay.
- // The default Value is 120 seconds. A value less than zero means there is no cap.
+ // The default Value is 60 seconds. A value less than zero means there is no cap.
MaxRetryDelay time.Duration
// StatusCodes specifies the HTTP status codes that indicate the operation should be retried.
@@ -112,6 +115,15 @@ type RetryOptions struct {
// Specifying values will replace the default values.
// Specifying an empty slice will disable retries for HTTP status codes.
StatusCodes []int
+
+ // ShouldRetry evaluates if the retry policy should retry the request.
+ // When specified, the function overrides comparison against the list of
+ // HTTP status codes and error checking within the retry policy. Context
+ // and NonRetriable errors remain evaluated before calling ShouldRetry.
+ // The *http.Response and error parameters are mutually exclusive, i.e.
+ // if one is nil, the other is not nil.
+ // A return value of true means the retry policy should retry.
+ ShouldRetry func(*http.Response, error) bool
}
// TelemetryOptions configures the telemetry policy's behavior.
@@ -125,12 +137,49 @@ type TelemetryOptions struct {
}
// TokenRequestOptions contain specific parameter that may be used by credentials types when attempting to get a token.
-type TokenRequestOptions struct {
- // Scopes contains the list of permission scopes required for the token.
- Scopes []string
-}
+type TokenRequestOptions = exported.TokenRequestOptions
// BearerTokenOptions configures the bearer token policy's behavior.
type BearerTokenOptions struct {
- // placeholder for future options
+ // AuthorizationHandler allows SDK developers to run client-specific logic when BearerTokenPolicy must authorize a request.
+ // When this field isn't set, the policy follows its default behavior of authorizing every request with a bearer token from
+ // its given credential.
+ AuthorizationHandler AuthorizationHandler
+}
+
+// AuthorizationHandler allows SDK developers to insert custom logic that runs when BearerTokenPolicy must authorize a request.
+type AuthorizationHandler struct {
+ // OnRequest is called each time the policy receives a request. Its func parameter authorizes the request with a token
+ // from the policy's given credential. Implementations that need to perform I/O should use the Request's context,
+ // available from Request.Raw().Context(). When OnRequest returns an error, the policy propagates that error and doesn't
+ // send the request. When OnRequest is nil, the policy follows its default behavior, authorizing the request with a
+ // token from its credential according to its configuration.
+ OnRequest func(*Request, func(TokenRequestOptions) error) error
+
+ // OnChallenge is called when the policy receives a 401 response, allowing the AuthorizationHandler to re-authorize the
+ // request according to an authentication challenge (the Response's WWW-Authenticate header). OnChallenge is responsible
+ // for parsing parameters from the challenge. Its func parameter will authorize the request with a token from the policy's
+ // given credential. Implementations that need to perform I/O should use the Request's context, available from
+ // Request.Raw().Context(). When OnChallenge returns nil, the policy will send the request again. When OnChallenge is nil,
+ // the policy will return any 401 response to the client.
+ OnChallenge func(*Request, *http.Response, func(TokenRequestOptions) error) error
+}
+
+// WithCaptureResponse applies the HTTP response retrieval annotation to the parent context.
+// The resp parameter will contain the HTTP response after the request has completed.
+func WithCaptureResponse(parent context.Context, resp **http.Response) context.Context {
+ return context.WithValue(parent, shared.CtxWithCaptureResponse{}, resp)
+}
+
+// WithHTTPHeader adds the specified http.Header to the parent context.
+// Use this to specify custom HTTP headers at the API-call level.
+// Any overlapping headers will have their values replaced with the values specified here.
+func WithHTTPHeader(parent context.Context, header http.Header) context.Context {
+ return context.WithValue(parent, shared.CtxWithHTTPHeaderKey{}, header)
+}
+
+// WithRetryOptions adds the specified RetryOptions to the parent context.
+// Use this to specify custom RetryOptions at the API-call level.
+func WithRetryOptions(parent context.Context, options RetryOptions) context.Context {
+ return context.WithValue(parent, shared.CtxWithRetryOptionsKey{}, options)
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pager.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pager.go
index 5507665d65..8a2e6c61ed 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pager.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pager.go
@@ -10,6 +10,9 @@ import (
"context"
"encoding/json"
"errors"
+ "net/http"
+
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
)
// PagingHandler contains the required data for constructing a Pager.
@@ -75,3 +78,41 @@ func (p *Pager[T]) NextPage(ctx context.Context) (T, error) {
func (p *Pager[T]) UnmarshalJSON(data []byte) error {
return json.Unmarshal(data, &p.current)
}
+
+// FetcherForNextLinkOptions contains the optional values for [FetcherForNextLink].
+type FetcherForNextLinkOptions struct {
+ // NextReq is the func to be called when requesting subsequent pages.
+ // Used for paged operations that have a custom next link operation.
+ NextReq func(context.Context, string) (*policy.Request, error)
+}
+
+// FetcherForNextLink is a helper containing boilerplate code to simplify creating a PagingHandler[T].Fetcher from a next link URL.
+// - ctx is the [context.Context] controlling the lifetime of the HTTP operation
+// - pl is the [Pipeline] used to dispatch the HTTP request
+// - nextLink is the URL used to fetch the next page. the empty string indicates the first page is to be requested
+// - firstReq is the func to be called when creating the request for the first page
+// - options contains any optional parameters, pass nil to accept the default values
+func FetcherForNextLink(ctx context.Context, pl Pipeline, nextLink string, firstReq func(context.Context) (*policy.Request, error), options *FetcherForNextLinkOptions) (*http.Response, error) {
+ var req *policy.Request
+ var err error
+ if nextLink == "" {
+ req, err = firstReq(ctx)
+ } else if nextLink, err = EncodeQueryParams(nextLink); err == nil {
+ if options != nil && options.NextReq != nil {
+ req, err = options.NextReq(ctx, nextLink)
+ } else {
+ req, err = NewRequest(ctx, http.MethodGet, nextLink)
+ }
+ }
+ if err != nil {
+ return nil, err
+ }
+ resp, err := pl.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ if !HasStatusCode(resp, http.StatusOK) {
+ return nil, NewResponseError(resp)
+ }
+ return resp, nil
+}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pipeline.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pipeline.go
index a2906f51bc..9d9288f53d 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pipeline.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pipeline.go
@@ -7,8 +7,6 @@
package runtime
import (
- "net/http"
-
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
)
@@ -46,7 +44,7 @@ func NewPipeline(module, version string, plOpts PipelineOptions, options *policy
}
// we put the includeResponsePolicy at the very beginning so that the raw response
// is populated with the final response (some policies might mutate the response)
- policies := []policy.Policy{policyFunc(includeResponsePolicy)}
+ policies := []policy.Policy{exported.PolicyFunc(includeResponsePolicy)}
if cp.APIVersion != "" {
policies = append(policies, newAPIVersionPolicy(cp.APIVersion, &plOpts.APIVersion))
}
@@ -59,19 +57,10 @@ func NewPipeline(module, version string, plOpts PipelineOptions, options *policy
policies = append(policies, plOpts.PerRetry...)
policies = append(policies, cp.PerRetryPolicies...)
policies = append(policies, NewLogPolicy(&cp.Logging))
- policies = append(policies, policyFunc(httpHeaderPolicy), policyFunc(bodyDownloadPolicy))
+ policies = append(policies, exported.PolicyFunc(httpHeaderPolicy), exported.PolicyFunc(bodyDownloadPolicy))
transport := cp.Transport
if transport == nil {
transport = defaultHTTPClient
}
return exported.NewPipeline(transport, policies...)
}
-
-// policyFunc is a type that implements the Policy interface.
-// Use this type when implementing a stateless policy as a first-class function.
-type policyFunc func(*policy.Request) (*http.Response, error)
-
-// Do implements the Policy interface on policyFunc.
-func (pf policyFunc) Do(req *policy.Request) (*http.Response, error) {
- return pf(req)
-}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_bearer_token.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_bearer_token.go
index 71e3062be0..ff4931cd24 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_bearer_token.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_bearer_token.go
@@ -4,35 +4,40 @@
package runtime
import (
+ "errors"
"net/http"
+ "strings"
"time"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
+ "github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo"
"github.com/Azure/azure-sdk-for-go/sdk/internal/temporal"
)
// BearerTokenPolicy authorizes requests with bearer tokens acquired from a TokenCredential.
type BearerTokenPolicy struct {
// mainResource is the resource to be retreived using the tenant specified in the credential
- mainResource *temporal.Resource[azcore.AccessToken, acquiringResourceState]
+ mainResource *temporal.Resource[exported.AccessToken, acquiringResourceState]
// the following fields are read-only
- cred azcore.TokenCredential
- scopes []string
+ authzHandler policy.AuthorizationHandler
+ cred exported.TokenCredential
+ scopes []string
}
type acquiringResourceState struct {
req *policy.Request
p *BearerTokenPolicy
+ tro policy.TokenRequestOptions
}
// acquire acquires or updates the resource; only one
// thread/goroutine at a time ever calls this function
-func acquire(state acquiringResourceState) (newResource azcore.AccessToken, newExpiration time.Time, err error) {
- tk, err := state.p.cred.GetToken(state.req.Raw().Context(), policy.TokenRequestOptions{Scopes: state.p.scopes})
+func acquire(state acquiringResourceState) (newResource exported.AccessToken, newExpiration time.Time, err error) {
+ tk, err := state.p.cred.GetToken(state.req.Raw().Context(), state.tro)
if err != nil {
- return azcore.AccessToken{}, time.Time{}, err
+ return exported.AccessToken{}, time.Time{}, err
}
return tk, tk.ExpiresOn, nil
}
@@ -41,24 +46,75 @@ func acquire(state acquiringResourceState) (newResource azcore.AccessToken, newE
// cred: an azcore.TokenCredential implementation such as a credential object from azidentity
// scopes: the list of permission scopes required for the token.
// opts: optional settings. Pass nil to accept default values; this is the same as passing a zero-value options.
-func NewBearerTokenPolicy(cred azcore.TokenCredential, scopes []string, opts *policy.BearerTokenOptions) *BearerTokenPolicy {
+func NewBearerTokenPolicy(cred exported.TokenCredential, scopes []string, opts *policy.BearerTokenOptions) *BearerTokenPolicy {
+ if opts == nil {
+ opts = &policy.BearerTokenOptions{}
+ }
return &BearerTokenPolicy{
+ authzHandler: opts.AuthorizationHandler,
cred: cred,
scopes: scopes,
mainResource: temporal.NewResource(acquire),
}
}
+// authenticateAndAuthorize returns a function which authorizes req with a token from the policy's credential
+func (b *BearerTokenPolicy) authenticateAndAuthorize(req *policy.Request) func(policy.TokenRequestOptions) error {
+ return func(tro policy.TokenRequestOptions) error {
+ as := acquiringResourceState{p: b, req: req, tro: tro}
+ tk, err := b.mainResource.Get(as)
+ if err != nil {
+ return err
+ }
+ req.Raw().Header.Set(shared.HeaderAuthorization, shared.BearerTokenPrefix+tk.Token)
+ return nil
+ }
+}
+
// Do authorizes a request with a bearer token
func (b *BearerTokenPolicy) Do(req *policy.Request) (*http.Response, error) {
- as := acquiringResourceState{
- p: b,
- req: req,
+ if strings.ToLower(req.Raw().URL.Scheme) != "https" {
+ return nil, shared.NonRetriableError(errors.New("bearer token authentication is not permitted for non TLS protected (https) endpoints"))
+ }
+ var err error
+ if b.authzHandler.OnRequest != nil {
+ err = b.authzHandler.OnRequest(req, b.authenticateAndAuthorize(req))
+ } else {
+ err = b.authenticateAndAuthorize(req)(policy.TokenRequestOptions{Scopes: b.scopes})
}
- tk, err := b.mainResource.Get(as)
+ if err != nil {
+ return nil, ensureNonRetriable(err)
+ }
+
+ res, err := req.Next()
if err != nil {
return nil, err
}
- req.Raw().Header.Set(shared.HeaderAuthorization, shared.BearerTokenPrefix+tk.Token)
- return req.Next()
+
+ if res.StatusCode == http.StatusUnauthorized {
+ b.mainResource.Expire()
+ if res.Header.Get("WWW-Authenticate") != "" && b.authzHandler.OnChallenge != nil {
+ if err = b.authzHandler.OnChallenge(req, res, b.authenticateAndAuthorize(req)); err == nil {
+ res, err = req.Next()
+ }
+ }
+ }
+ return res, ensureNonRetriable(err)
+}
+
+func ensureNonRetriable(err error) error {
+ var nre errorinfo.NonRetriable
+ if err != nil && !errors.As(err, &nre) {
+ err = btpError{err}
+ }
+ return err
}
+
+// btpError is a wrapper that ensures RetryPolicy doesn't retry requests BearerTokenPolicy couldn't authorize
+type btpError struct {
+ error
+}
+
+func (btpError) NonRetriable() {}
+
+var _ errorinfo.NonRetriable = (*btpError)(nil)
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_body_download.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_body_download.go
index 02d621ee89..99dc029f0c 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_body_download.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_body_download.go
@@ -11,7 +11,6 @@ import (
"net/http"
"strings"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
"github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo"
)
@@ -29,7 +28,7 @@ func bodyDownloadPolicy(req *policy.Request) (*http.Response, error) {
}
// Either bodyDownloadPolicyOpValues was not specified (so skip is false)
// or it was specified and skip is false: don't skip downloading the body
- _, err = exported.Payload(resp)
+ _, err = Payload(resp)
if err != nil {
return resp, newBodyDownloadError(err, req)
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_http_header.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_http_header.go
index 770e0a2b6a..c230af0afa 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_http_header.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_http_header.go
@@ -34,6 +34,7 @@ func httpHeaderPolicy(req *policy.Request) (*http.Response, error) {
// WithHTTPHeader adds the specified http.Header to the parent context.
// Use this to specify custom HTTP headers at the API-call level.
// Any overlapping headers will have their values replaced with the values specified here.
+// Deprecated: use [policy.WithHTTPHeader] instead.
func WithHTTPHeader(parent context.Context, header http.Header) context.Context {
- return context.WithValue(parent, shared.CtxWithHTTPHeaderKey{}, header)
+ return policy.WithHTTPHeader(parent, header)
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_include_response.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_include_response.go
index 4714baa30c..bb00f6c2fd 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_include_response.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_include_response.go
@@ -20,7 +20,7 @@ func includeResponsePolicy(req *policy.Request) (*http.Response, error) {
if resp == nil {
return resp, err
}
- if httpOutRaw := req.Raw().Context().Value(shared.CtxIncludeResponseKey{}); httpOutRaw != nil {
+ if httpOutRaw := req.Raw().Context().Value(shared.CtxWithCaptureResponse{}); httpOutRaw != nil {
httpOut := httpOutRaw.(**http.Response)
*httpOut = resp
}
@@ -29,6 +29,7 @@ func includeResponsePolicy(req *policy.Request) (*http.Response, error) {
// WithCaptureResponse applies the HTTP response retrieval annotation to the parent context.
// The resp parameter will contain the HTTP response after the request has completed.
+// Deprecated: use [policy.WithCaptureResponse] instead.
func WithCaptureResponse(parent context.Context, resp **http.Response) context.Context {
- return context.WithValue(parent, shared.CtxIncludeResponseKey{}, resp)
+ return policy.WithCaptureResponse(parent, resp)
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_key_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_key_credential.go
new file mode 100644
index 0000000000..2e47a5bad0
--- /dev/null
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_key_credential.go
@@ -0,0 +1,49 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+package runtime
+
+import (
+ "net/http"
+
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
+)
+
+// KeyCredentialPolicy authorizes requests with a [azcore.KeyCredential].
+type KeyCredentialPolicy struct {
+ cred *exported.KeyCredential
+ header string
+ prefix string
+}
+
+// KeyCredentialPolicyOptions contains the optional values configuring [KeyCredentialPolicy].
+type KeyCredentialPolicyOptions struct {
+ // Prefix is used if the key requires a prefix before it's inserted into the HTTP request.
+ Prefix string
+}
+
+// NewKeyCredentialPolicy creates a new instance of [KeyCredentialPolicy].
+// - cred is the [azcore.KeyCredential] used to authenticate with the service
+// - header is the name of the HTTP request header in which the key is placed
+// - options contains optional configuration, pass nil to accept the default values
+func NewKeyCredentialPolicy(cred *exported.KeyCredential, header string, options *KeyCredentialPolicyOptions) *KeyCredentialPolicy {
+ if options == nil {
+ options = &KeyCredentialPolicyOptions{}
+ }
+ return &KeyCredentialPolicy{
+ cred: cred,
+ header: header,
+ prefix: options.Prefix,
+ }
+}
+
+// Do implementes the Do method on the [policy.Polilcy] interface.
+func (k *KeyCredentialPolicy) Do(req *policy.Request) (*http.Response, error) {
+ val := exported.KeyCredentialGet(k.cred)
+ if k.prefix != "" {
+ val = k.prefix + val
+ }
+ req.Raw().Header.Add(k.header, val)
+ return req.Next()
+}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_logging.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_logging.go
index 30a02a7a41..8514f57d5c 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_logging.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_logging.go
@@ -11,6 +11,7 @@ import (
"fmt"
"io"
"net/http"
+ "net/url"
"sort"
"strings"
"time"
@@ -66,12 +67,7 @@ func NewLogPolicy(o *policy.LogOptions) policy.Policy {
allowedHeaders[strings.ToLower(ah)] = struct{}{}
}
// now do the same thing for query params
- allowedQP := map[string]struct{}{
- "api-version": {},
- }
- for _, qp := range o.AllowedQueryParams {
- allowedQP[strings.ToLower(qp)] = struct{}{}
- }
+ allowedQP := getAllowedQueryParams(o.AllowedQueryParams)
return &logPolicy{
includeBody: o.IncludeBody,
allowedHeaders: allowedHeaders,
@@ -79,6 +75,18 @@ func NewLogPolicy(o *policy.LogOptions) policy.Policy {
}
}
+// getAllowedQueryParams merges the default set of allowed query parameters
+// with a custom set (usually comes from client options).
+func getAllowedQueryParams(customAllowedQP []string) map[string]struct{} {
+ allowedQP := map[string]struct{}{
+ "api-version": {},
+ }
+ for _, qp := range customAllowedQP {
+ allowedQP[strings.ToLower(qp)] = struct{}{}
+ }
+ return allowedQP
+}
+
// logPolicyOpValues is the struct containing the per-operation values
type logPolicyOpValues struct {
try int32
@@ -140,20 +148,24 @@ func (p *logPolicy) Do(req *policy.Request) (*http.Response, error) {
const redactedValue = "REDACTED"
-// writeRequestWithResponse appends a formatted HTTP request into a Buffer. If request and/or err are
-// not nil, then these are also written into the Buffer.
-func (p *logPolicy) writeRequestWithResponse(b *bytes.Buffer, req *policy.Request, resp *http.Response, err error) {
+// getSanitizedURL returns a sanitized string for the provided url.URL
+func getSanitizedURL(u url.URL, allowedQueryParams map[string]struct{}) string {
// redact applicable query params
- cpURL := *req.Raw().URL
- qp := cpURL.Query()
+ qp := u.Query()
for k := range qp {
- if _, ok := p.allowedQP[strings.ToLower(k)]; !ok {
+ if _, ok := allowedQueryParams[strings.ToLower(k)]; !ok {
qp.Set(k, redactedValue)
}
}
- cpURL.RawQuery = qp.Encode()
+ u.RawQuery = qp.Encode()
+ return u.String()
+}
+
+// writeRequestWithResponse appends a formatted HTTP request into a Buffer. If request and/or err are
+// not nil, then these are also written into the Buffer.
+func (p *logPolicy) writeRequestWithResponse(b *bytes.Buffer, req *policy.Request, resp *http.Response, err error) {
// Write the request into the buffer.
- fmt.Fprint(b, " "+req.Raw().Method+" "+cpURL.String()+"\n")
+ fmt.Fprint(b, " "+req.Raw().Method+" "+getSanitizedURL(*req.Raw().URL, p.allowedQP)+"\n")
p.writeHeader(b, req.Raw().Header)
if resp != nil {
fmt.Fprintln(b, " --------------------------------------------------------------------------------")
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_request_id.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_request_id.go
index db70955b28..360a7f2118 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_request_id.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_request_id.go
@@ -9,6 +9,7 @@ package runtime
import (
"net/http"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
"github.com/Azure/azure-sdk-for-go/sdk/internal/uuid"
)
@@ -21,13 +22,12 @@ func NewRequestIDPolicy() policy.Policy {
}
func (r *requestIDPolicy) Do(req *policy.Request) (*http.Response, error) {
- const requestIdHeader = "x-ms-client-request-id"
- if req.Raw().Header.Get(requestIdHeader) == "" {
+ if req.Raw().Header.Get(shared.HeaderXMSClientRequestID) == "" {
id, err := uuid.New()
if err != nil {
return nil, err
}
- req.Raw().Header.Set(requestIdHeader, id.String())
+ req.Raw().Header.Set(shared.HeaderXMSClientRequestID, id.String())
}
return req.Next()
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_retry.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_retry.go
index b330020187..21fcb39682 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_retry.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_retry.go
@@ -19,6 +19,7 @@ import (
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
"github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo"
+ "github.com/Azure/azure-sdk-for-go/sdk/internal/exported"
)
const (
@@ -124,7 +125,8 @@ func (p *retryPolicy) Do(req *policy.Request) (resp *http.Response, err error) {
}
if options.TryTimeout == 0 {
- resp, err = req.Next()
+ clone := req.Clone(req.Raw().Context())
+ resp, err = clone.Next()
} else {
// Set the per-try time for this particular retry operation and then Do the operation.
tryCtx, tryCancel := context.WithTimeout(req.Raw().Context(), options.TryTimeout)
@@ -133,7 +135,7 @@ func (p *retryPolicy) Do(req *policy.Request) (resp *http.Response, err error) {
// if the body was already downloaded or there was an error it's safe to cancel the context now
if err != nil {
tryCancel()
- } else if _, ok := resp.Body.(*shared.NopClosingBytesReader); ok {
+ } else if exported.PayloadDownloaded(resp) {
tryCancel()
} else {
// must cancel the context after the body has been read and closed
@@ -146,11 +148,7 @@ func (p *retryPolicy) Do(req *policy.Request) (resp *http.Response, err error) {
log.Writef(log.EventRetryPolicy, "error %v", err)
}
- if err == nil && !HasStatusCode(resp, options.StatusCodes...) {
- // if there is no error and the response code isn't in the list of retry codes then we're done.
- log.Write(log.EventRetryPolicy, "exit due to non-retriable status code")
- return
- } else if ctxErr := req.Raw().Context().Err(); ctxErr != nil {
+ if ctxErr := req.Raw().Context().Err(); ctxErr != nil {
// don't retry if the parent context has been cancelled or its deadline exceeded
err = ctxErr
log.Writef(log.EventRetryPolicy, "abort due to %v", err)
@@ -165,6 +163,19 @@ func (p *retryPolicy) Do(req *policy.Request) (resp *http.Response, err error) {
return
}
+ if options.ShouldRetry != nil {
+ // a non-nil ShouldRetry overrides our HTTP status code check
+ if !options.ShouldRetry(resp, err) {
+ // predicate says we shouldn't retry
+ log.Write(log.EventRetryPolicy, "exit due to ShouldRetry")
+ return
+ }
+ } else if err == nil && !HasStatusCode(resp, options.StatusCodes...) {
+ // if there is no error and the response code isn't in the list of retry codes then we're done.
+ log.Write(log.EventRetryPolicy, "exit due to non-retriable status code")
+ return
+ }
+
if try == options.MaxRetries+1 {
// max number of tries has been reached, don't sleep again
log.Writef(log.EventRetryPolicy, "MaxRetries %d exceeded", options.MaxRetries)
@@ -198,8 +209,9 @@ func (p *retryPolicy) Do(req *policy.Request) (resp *http.Response, err error) {
// WithRetryOptions adds the specified RetryOptions to the parent context.
// Use this to specify custom RetryOptions at the API-call level.
+// Deprecated: use [policy.WithRetryOptions] instead.
func WithRetryOptions(parent context.Context, options policy.RetryOptions) context.Context {
- return context.WithValue(parent, shared.CtxWithRetryOptionsKey{}, options)
+ return policy.WithRetryOptions(parent, options)
}
// ********** The following type/methods implement the retryableRequestBody (a ReadSeekCloser)
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_sas_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_sas_credential.go
new file mode 100644
index 0000000000..25266030ba
--- /dev/null
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_sas_credential.go
@@ -0,0 +1,39 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+package runtime
+
+import (
+ "net/http"
+
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
+)
+
+// SASCredentialPolicy authorizes requests with a [azcore.SASCredential].
+type SASCredentialPolicy struct {
+ cred *exported.SASCredential
+ header string
+}
+
+// SASCredentialPolicyOptions contains the optional values configuring [SASCredentialPolicy].
+type SASCredentialPolicyOptions struct {
+ // placeholder for future optional values
+}
+
+// NewSASCredentialPolicy creates a new instance of [SASCredentialPolicy].
+// - cred is the [azcore.SASCredential] used to authenticate with the service
+// - header is the name of the HTTP request header in which the shared access signature is placed
+// - options contains optional configuration, pass nil to accept the default values
+func NewSASCredentialPolicy(cred *exported.SASCredential, header string, options *SASCredentialPolicyOptions) *SASCredentialPolicy {
+ return &SASCredentialPolicy{
+ cred: cred,
+ header: header,
+ }
+}
+
+// Do implementes the Do method on the [policy.Polilcy] interface.
+func (k *SASCredentialPolicy) Do(req *policy.Request) (*http.Response, error) {
+ req.Raw().Header.Add(k.header, exported.SASCredentialGet(k.cred))
+ return req.Next()
+}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/poller.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/poller.go
index 14c90fecfe..3d029a3d15 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/poller.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/poller.go
@@ -23,6 +23,7 @@ import (
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/loc"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared"
+ "github.com/Azure/azure-sdk-for-go/sdk/internal/poller"
)
// FinalStateVia is the enumerated type for the possible final-state-via values.
@@ -75,7 +76,7 @@ func NewPoller[T any](resp *http.Response, pl exported.Pipeline, options *NewPol
defer resp.Body.Close()
// this is a back-stop in case the swagger is incorrect (i.e. missing one or more status codes for success).
// ideally the codegen should return an error if the initial response failed and not even create a poller.
- if !pollers.StatusCodeValid(resp) {
+ if !poller.StatusCodeValid(resp) {
return nil, errors.New("the operation failed or was cancelled")
}
@@ -146,7 +147,9 @@ func NewPollerFromResumeToken[T any](token string, pl exported.Pipeline, options
opr := options.Handler
// now rehydrate the poller based on the encoded poller type
- if async.CanResume(asJSON) {
+ if opr != nil {
+ log.Writef(log.EventLRO, "Resuming custom poller %T.", opr)
+ } else if async.CanResume(asJSON) {
opr, _ = async.New[T](pl, nil, "")
} else if body.CanResume(asJSON) {
opr, _ = body.New[T](pl, nil)
@@ -154,8 +157,6 @@ func NewPollerFromResumeToken[T any](token string, pl exported.Pipeline, options
opr, _ = loc.New[T](pl, nil)
} else if op.CanResume(asJSON) {
opr, _ = op.New[T](pl, nil, "")
- } else if opr != nil {
- log.Writef(log.EventLRO, "Resuming custom poller %T.", opr)
} else {
return nil, fmt.Errorf("unhandled poller token %s", string(raw))
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/request.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/request.go
index 98e0071848..b7e6fb26f9 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/request.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/request.go
@@ -15,6 +15,7 @@ import (
"fmt"
"io"
"mime/multipart"
+ "net/url"
"os"
"path"
"reflect"
@@ -44,6 +45,19 @@ func NewRequest(ctx context.Context, httpMethod string, endpoint string) (*polic
return exported.NewRequest(ctx, httpMethod, endpoint)
}
+// EncodeQueryParams will parse and encode any query parameters in the specified URL.
+func EncodeQueryParams(u string) (string, error) {
+ before, after, found := strings.Cut(u, "?")
+ if !found {
+ return u, nil
+ }
+ qp, err := url.ParseQuery(after)
+ if err != nil {
+ return "", err
+ }
+ return before + "?" + qp.Encode(), nil
+}
+
// JoinPaths concatenates multiple URL path segments into one path,
// inserting path separation characters as required. JoinPaths will preserve
// query parameters in the root path
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/response.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/response.go
index f86ec0b95e..d1f58e9e29 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/response.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/response.go
@@ -15,15 +15,14 @@ import (
"io"
"net/http"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared"
+ "github.com/Azure/azure-sdk-for-go/sdk/internal/exported"
)
// Payload reads and returns the response body or an error.
// On a successful read, the response body is cached.
// Subsequent reads will access the cached value.
func Payload(resp *http.Response) ([]byte, error) {
- return exported.Payload(resp)
+ return exported.Payload(resp, nil)
}
// HasStatusCode returns true if the Response's status code is one of the specified values.
@@ -92,15 +91,15 @@ func Drain(resp *http.Response) {
// removeBOM removes any byte-order mark prefix from the payload if present.
func removeBOM(resp *http.Response) error {
- payload, err := Payload(resp)
+ _, err := exported.Payload(resp, &exported.PayloadOptions{
+ BytesModifier: func(b []byte) []byte {
+ // UTF8
+ return bytes.TrimPrefix(b, []byte("\xef\xbb\xbf"))
+ },
+ })
if err != nil {
return err
}
- // UTF8
- trimmed := bytes.TrimPrefix(payload, []byte("\xef\xbb\xbf"))
- if len(trimmed) < len(payload) {
- resp.Body.(shared.BytesSetter).Set(trimmed)
- }
return nil
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_dialer_other.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_dialer_other.go
new file mode 100644
index 0000000000..1c75d771f2
--- /dev/null
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_dialer_other.go
@@ -0,0 +1,15 @@
+//go:build !wasm
+
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+package runtime
+
+import (
+ "context"
+ "net"
+)
+
+func defaultTransportDialContext(dialer *net.Dialer) func(context.Context, string, string) (net.Conn, error) {
+ return dialer.DialContext
+}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_dialer_wasm.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_dialer_wasm.go
new file mode 100644
index 0000000000..3dc9eeecdd
--- /dev/null
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_dialer_wasm.go
@@ -0,0 +1,15 @@
+//go:build (js && wasm) || wasip1
+
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+package runtime
+
+import (
+ "context"
+ "net"
+)
+
+func defaultTransportDialContext(dialer *net.Dialer) func(context.Context, string, string) (net.Conn, error) {
+ return nil
+}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_http_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_http_client.go
index 869bed5118..589d09f2c0 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_http_client.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_http_client.go
@@ -18,17 +18,18 @@ var defaultHTTPClient *http.Client
func init() {
defaultTransport := &http.Transport{
Proxy: http.ProxyFromEnvironment,
- DialContext: (&net.Dialer{
+ DialContext: defaultTransportDialContext(&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
- }).DialContext,
+ }),
ForceAttemptHTTP2: true,
MaxIdleConns: 100,
IdleConnTimeout: 90 * time.Second,
TLSHandshakeTimeout: 10 * time.Second,
ExpectContinueTimeout: 1 * time.Second,
TLSClientConfig: &tls.Config{
- MinVersion: tls.VersionTLS12,
+ MinVersion: tls.VersionTLS12,
+ Renegotiation: tls.RenegotiateFreelyAsClient,
},
}
defaultHTTPClient = &http.Client{
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming/progress.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming/progress.go
index 8563375af0..fbcd48311b 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming/progress.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming/progress.go
@@ -20,6 +20,9 @@ type progress struct {
}
// NopCloser returns a ReadSeekCloser with a no-op close method wrapping the provided io.ReadSeeker.
+// In addition to adding a Close method to an io.ReadSeeker, this can also be used to wrap an
+// io.ReadSeekCloser with a no-op Close method to allow explicit control of when the io.ReedSeekCloser
+// has its underlying stream closed.
func NopCloser(rs io.ReadSeeker) io.ReadSeekCloser {
return exported.NopCloser(rs)
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/to/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/to/doc.go
new file mode 100644
index 0000000000..faa98c9dc5
--- /dev/null
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/to/doc.go
@@ -0,0 +1,9 @@
+//go:build go1.18
+// +build go1.18
+
+// Copyright 2017 Microsoft Corporation. All rights reserved.
+// Use of this source code is governed by an MIT
+// license that can be found in the LICENSE file.
+
+// Package to contains various type-conversion helper functions.
+package to
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/to/to.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/to/to.go
new file mode 100644
index 0000000000..e0e4817b90
--- /dev/null
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/to/to.go
@@ -0,0 +1,21 @@
+//go:build go1.18
+// +build go1.18
+
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+package to
+
+// Ptr returns a pointer to the provided value.
+func Ptr[T any](v T) *T {
+ return &v
+}
+
+// SliceOfPtrs returns a slice of *T from the specified values.
+func SliceOfPtrs[T any](vv ...T) []*T {
+ slc := make([]*T, len(vv))
+ for i := range vv {
+ slc[i] = Ptr(vv[i])
+ }
+ return slc
+}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md
index 5877e476f6..7ea119ab30 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md
@@ -1,5 +1,137 @@
# Release History
+## 1.4.0 (2023-10-10)
+
+### Bugs Fixed
+* `ManagedIdentityCredential` will now retry when IMDS responds 410 or 503
+
+## 1.4.0-beta.5 (2023-09-12)
+
+### Features Added
+* Service principal credentials can request CAE tokens
+
+### Breaking Changes
+> These changes affect only code written against a beta version such as v1.4.0-beta.4
+* Whether `GetToken` requests a CAE token is now determined by `TokenRequestOptions.EnableCAE`. Azure
+ SDK clients which support CAE will set this option automatically. Credentials no longer request CAE
+ tokens by default or observe the environment variable "AZURE_IDENTITY_DISABLE_CP1".
+
+### Bugs Fixed
+* Credential chains such as `DefaultAzureCredential` now try their next credential, if any, when
+ managed identity authentication fails in a Docker Desktop container
+ ([#21417](https://github.com/Azure/azure-sdk-for-go/issues/21417))
+
+## 1.4.0-beta.4 (2023-08-16)
+
+### Other Changes
+* Upgraded dependencies
+
+## 1.3.1 (2023-08-16)
+
+### Other Changes
+* Upgraded dependencies
+
+## 1.4.0-beta.3 (2023-08-08)
+
+### Bugs Fixed
+* One invocation of `AzureCLICredential.GetToken()` and `OnBehalfOfCredential.GetToken()`
+ can no longer make two authentication attempts
+
+## 1.4.0-beta.2 (2023-07-14)
+
+### Other Changes
+* `DefaultAzureCredentialOptions.TenantID` applies to workload identity authentication
+* Upgraded dependencies
+
+## 1.4.0-beta.1 (2023-06-06)
+
+### Other Changes
+* Re-enabled CAE support as in v1.3.0-beta.3
+
+## 1.3.0 (2023-05-09)
+
+### Breaking Changes
+> These changes affect only code written against a beta version such as v1.3.0-beta.5
+* Renamed `NewOnBehalfOfCredentialFromCertificate` to `NewOnBehalfOfCredentialWithCertificate`
+* Renamed `NewOnBehalfOfCredentialFromSecret` to `NewOnBehalfOfCredentialWithSecret`
+
+### Other Changes
+* Upgraded to MSAL v1.0.0
+
+## 1.3.0-beta.5 (2023-04-11)
+
+### Breaking Changes
+> These changes affect only code written against a beta version such as v1.3.0-beta.4
+* Moved `NewWorkloadIdentityCredential()` parameters into `WorkloadIdentityCredentialOptions`.
+ The constructor now reads default configuration from environment variables set by the Azure
+ workload identity webhook by default.
+ ([#20478](https://github.com/Azure/azure-sdk-for-go/pull/20478))
+* Removed CAE support. It will return in v1.4.0-beta.1
+ ([#20479](https://github.com/Azure/azure-sdk-for-go/pull/20479))
+
+### Bugs Fixed
+* Fixed an issue in `DefaultAzureCredential` that could cause the managed identity endpoint check to fail in rare circumstances.
+
+## 1.3.0-beta.4 (2023-03-08)
+
+### Features Added
+* Added `WorkloadIdentityCredentialOptions.AdditionallyAllowedTenants` and `.DisableInstanceDiscovery`
+
+### Bugs Fixed
+* Credentials now synchronize within `GetToken()` so a single instance can be shared among goroutines
+ ([#20044](https://github.com/Azure/azure-sdk-for-go/issues/20044))
+
+### Other Changes
+* Upgraded dependencies
+
+## 1.2.2 (2023-03-07)
+
+### Other Changes
+* Upgraded dependencies
+
+## 1.3.0-beta.3 (2023-02-07)
+
+### Features Added
+* By default, credentials set client capability "CP1" to enable support for
+ [Continuous Access Evaluation (CAE)](https://docs.microsoft.com/azure/active-directory/develop/app-resilience-continuous-access-evaluation).
+ This indicates to Azure Active Directory that your application can handle CAE claims challenges.
+ You can disable this behavior by setting the environment variable "AZURE_IDENTITY_DISABLE_CP1" to "true".
+* `InteractiveBrowserCredentialOptions.LoginHint` enables pre-populating the login
+ prompt with a username ([#15599](https://github.com/Azure/azure-sdk-for-go/pull/15599))
+* Service principal and user credentials support ADFS authentication on Azure Stack.
+ Specify "adfs" as the credential's tenant.
+* Applications running in private or disconnected clouds can prevent credentials from
+ requesting Azure AD instance metadata by setting the `DisableInstanceDiscovery`
+ field on credential options.
+* Many credentials can now be configured to authenticate in multiple tenants. The
+ options types for these credentials have an `AdditionallyAllowedTenants` field
+ that specifies additional tenants in which the credential may authenticate.
+
+## 1.3.0-beta.2 (2023-01-10)
+
+### Features Added
+* Added `OnBehalfOfCredential` to support the on-behalf-of flow
+ ([#16642](https://github.com/Azure/azure-sdk-for-go/issues/16642))
+
+### Bugs Fixed
+* `AzureCLICredential` reports token expiration in local time (should be UTC)
+
+### Other Changes
+* `AzureCLICredential` imposes its default timeout only when the `Context`
+ passed to `GetToken()` has no deadline
+* Added `NewCredentialUnavailableError()`. This function constructs an error indicating
+ a credential can't authenticate and an encompassing `ChainedTokenCredential` should
+ try its next credential, if any.
+
+## 1.3.0-beta.1 (2022-12-13)
+
+### Features Added
+* `WorkloadIdentityCredential` and `DefaultAzureCredential` support
+ Workload Identity Federation on Kubernetes. `DefaultAzureCredential`
+ support requires environment variable configuration as set by the
+ Workload Identity webhook.
+ ([#15615](https://github.com/Azure/azure-sdk-for-go/issues/15615))
+
## 1.2.0 (2022-11-08)
### Other Changes
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md
index 2df42c813a..da0baa9add 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md
@@ -55,8 +55,9 @@ an Azure AD access token. See [Credential Types](#credential-types "Credential T
![DefaultAzureCredential authentication flow](img/mermaidjs/DefaultAzureCredentialAuthFlow.svg)
1. **Environment** - `DefaultAzureCredential` will read account information specified via [environment variables](#environment-variables) and use it to authenticate.
-2. **Managed Identity** - If the app is deployed to an Azure host with managed identity enabled, `DefaultAzureCredential` will authenticate with it.
-3. **Azure CLI** - If a user or service principal has authenticated via the Azure CLI `az login` command, `DefaultAzureCredential` will authenticate that identity.
+1. **Workload Identity** - If the app is deployed on Kubernetes with environment variables set by the workload identity webhook, `DefaultAzureCredential` will authenticate the configured identity.
+1. **Managed Identity** - If the app is deployed to an Azure host with managed identity enabled, `DefaultAzureCredential` will authenticate with it.
+1. **Azure CLI** - If a user or service principal has authenticated via the Azure CLI `az login` command, `DefaultAzureCredential` will authenticate that identity.
> Note: `DefaultAzureCredential` is intended to simplify getting started with the SDK by handling common scenarios with reasonable default behaviors. Developers who want more control or whose scenario isn't served by the default settings should use other credential types.
@@ -128,12 +129,13 @@ client := armresources.NewResourceGroupsClient("subscription ID", chain, nil)
|[ChainedTokenCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#ChainedTokenCredential)|Define custom authentication flows, composing multiple credentials
|[EnvironmentCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#EnvironmentCredential)|Authenticate a service principal or user configured by environment variables
|[ManagedIdentityCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#ManagedIdentityCredential)|Authenticate the managed identity of an Azure resource
+|[WorkloadIdentityCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#WorkloadIdentityCredential)|Authenticate a workload identity on Kubernetes
### Authenticating Service Principals
|Credential|Usage
|-|-
-|[ClientAssertionCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity@v1.2.0-beta.2#ClientAssertionCredential)|Authenticate a service principal with a signed client assertion
+|[ClientAssertionCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#ClientAssertionCredential)|Authenticate a service principal with a signed client assertion
|[ClientCertificateCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#ClientCertificateCredential)|Authenticate a service principal with a certificate
|[ClientSecretCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#ClientSecretCredential)|Authenticate a service principal with a secret
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md
index affa91d087..fef099813c 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md
@@ -8,16 +8,17 @@ This troubleshooting guide covers failure investigation techniques, common error
- [Permission issues](#permission-issues)
- [Find relevant information in errors](#find-relevant-information-in-errors)
- [Enable and configure logging](#enable-and-configure-logging)
+- [Troubleshoot AzureCliCredential authentication issues](#troubleshoot-azureclicredential-authentication-issues)
+- [Troubleshoot ClientCertificateCredential authentication issues](#troubleshoot-clientcertificatecredential-authentication-issues)
+- [Troubleshoot ClientSecretCredential authentication issues](#troubleshoot-clientsecretcredential-authentication-issues)
- [Troubleshoot DefaultAzureCredential authentication issues](#troubleshoot-defaultazurecredential-authentication-issues)
- [Troubleshoot EnvironmentCredential authentication issues](#troubleshoot-environmentcredential-authentication-issues)
-- [Troubleshoot ClientSecretCredential authentication issues](#troubleshoot-clientsecretcredential-authentication-issues)
-- [Troubleshoot ClientCertificateCredential authentication issues](#troubleshoot-clientcertificatecredential-authentication-issues)
-- [Troubleshoot UsernamePasswordCredential authentication issues](#troubleshoot-usernamepasswordcredential-authentication-issues)
- [Troubleshoot ManagedIdentityCredential authentication issues](#troubleshoot-managedidentitycredential-authentication-issues)
- - [Azure Virtual Machine managed identity](#azure-virtual-machine-managed-identity)
- [Azure App Service and Azure Functions managed identity](#azure-app-service-and-azure-functions-managed-identity)
- [Azure Kubernetes Service managed identity](#azure-kubernetes-service-managed-identity)
-- [Troubleshoot AzureCliCredential authentication issues](#troubleshoot-azureclicredential-authentication-issues)
+ - [Azure Virtual Machine managed identity](#azure-virtual-machine-managed-identity)
+- [Troubleshoot UsernamePasswordCredential authentication issues](#troubleshoot-usernamepasswordcredential-authentication-issues)
+- [Troubleshoot WorkloadIdentityCredential authentication issues](#troubleshoot-workloadidentitycredential-authentication-issues)
- [Get additional help](#get-additional-help)
## Handle azidentity errors
@@ -75,12 +76,14 @@ azlog.SetListener(func(event azlog.Event, s string) {
azlog.SetEvents(azidentity.EventAuthentication)
```
+
## Troubleshoot DefaultAzureCredential authentication issues
| Error |Description| Mitigation |
|---|---|---|
-|"DefaultAzureCredential failed to acquire a token"|No credential in the `DefaultAzureCredential` chain provided a token|
-//
func (d *DefaultErrorStrategy) Sync(recognizer Parser) {
// If already recovering, don't try to Sync
if d.InErrorRecoveryMode(recognizer) {
@@ -247,7 +236,6 @@ func (d *DefaultErrorStrategy) Sync(recognizer Parser) {
//
// @param recognizer the parser instance
// @param e the recognition exception
-//
func (d *DefaultErrorStrategy) ReportNoViableAlternative(recognizer Parser, e *NoViableAltException) {
tokens := recognizer.GetTokenStream()
var input string
@@ -264,7 +252,6 @@ func (d *DefaultErrorStrategy) ReportNoViableAlternative(recognizer Parser, e *N
recognizer.NotifyErrorListeners(msg, e.offendingToken, e)
}
-//
// This is called by {@link //ReportError} when the exception is an
// {@link InputMisMatchException}.
//
@@ -272,14 +259,12 @@ func (d *DefaultErrorStrategy) ReportNoViableAlternative(recognizer Parser, e *N
//
// @param recognizer the parser instance
// @param e the recognition exception
-//
func (this *DefaultErrorStrategy) ReportInputMisMatch(recognizer Parser, e *InputMisMatchException) {
msg := "mismatched input " + this.GetTokenErrorDisplay(e.offendingToken) +
" expecting " + e.getExpectedTokens().StringVerbose(recognizer.GetLiteralNames(), recognizer.GetSymbolicNames(), false)
recognizer.NotifyErrorListeners(msg, e.offendingToken, e)
}
-//
// This is called by {@link //ReportError} when the exception is a
// {@link FailedPredicateException}.
//
@@ -287,7 +272,6 @@ func (this *DefaultErrorStrategy) ReportInputMisMatch(recognizer Parser, e *Inpu
//
// @param recognizer the parser instance
// @param e the recognition exception
-//
func (d *DefaultErrorStrategy) ReportFailedPredicate(recognizer Parser, e *FailedPredicateException) {
ruleName := recognizer.GetRuleNames()[recognizer.GetParserRuleContext().GetRuleIndex()]
msg := "rule " + ruleName + " " + e.message
@@ -310,7 +294,6 @@ func (d *DefaultErrorStrategy) ReportFailedPredicate(recognizer Parser, e *Faile
// {@link Parser//NotifyErrorListeners}.
//
// @param recognizer the parser instance
-//
func (d *DefaultErrorStrategy) ReportUnwantedToken(recognizer Parser) {
if d.InErrorRecoveryMode(recognizer) {
return
@@ -339,7 +322,6 @@ func (d *DefaultErrorStrategy) ReportUnwantedToken(recognizer Parser) {
// {@link Parser//NotifyErrorListeners}.
//
// @param recognizer the parser instance
-//
func (d *DefaultErrorStrategy) ReportMissingToken(recognizer Parser) {
if d.InErrorRecoveryMode(recognizer) {
return
@@ -392,15 +374,14 @@ func (d *DefaultErrorStrategy) ReportMissingToken(recognizer Parser) {
// derivation:
//
//
-// => ID '=' '(' INT ')' ('+' atom)* ''
+// => ID '=' '(' INT ')' ('+' atom)* ”
// ^
//
//
-// The attempt to Match {@code ')'} will fail when it sees {@code ''} and
-// call {@link //recoverInline}. To recover, it sees that {@code LA(1)==''}
+// The attempt to Match {@code ')'} will fail when it sees {@code ”} and
+// call {@link //recoverInline}. To recover, it sees that {@code LA(1)==”}
// is in the set of tokens that can follow the {@code ')'} token reference
// in rule {@code atom}. It can assume that you forgot the {@code ')'}.
-//
func (d *DefaultErrorStrategy) RecoverInline(recognizer Parser) Token {
// SINGLE TOKEN DELETION
MatchedSymbol := d.SingleTokenDeletion(recognizer)
@@ -418,7 +399,6 @@ func (d *DefaultErrorStrategy) RecoverInline(recognizer Parser) Token {
panic(NewInputMisMatchException(recognizer))
}
-//
// This method implements the single-token insertion inline error recovery
// strategy. It is called by {@link //recoverInline} if the single-token
// deletion strategy fails to recover from the mismatched input. If this
@@ -434,7 +414,6 @@ func (d *DefaultErrorStrategy) RecoverInline(recognizer Parser) Token {
// @param recognizer the parser instance
// @return {@code true} if single-token insertion is a viable recovery
// strategy for the current mismatched input, otherwise {@code false}
-//
func (d *DefaultErrorStrategy) SingleTokenInsertion(recognizer Parser) bool {
currentSymbolType := recognizer.GetTokenStream().LA(1)
// if current token is consistent with what could come after current
@@ -469,7 +448,6 @@ func (d *DefaultErrorStrategy) SingleTokenInsertion(recognizer Parser) bool {
// @return the successfully Matched {@link Token} instance if single-token
// deletion successfully recovers from the mismatched input, otherwise
// {@code nil}
-//
func (d *DefaultErrorStrategy) SingleTokenDeletion(recognizer Parser) Token {
NextTokenType := recognizer.GetTokenStream().LA(2)
expecting := d.GetExpectedTokens(recognizer)
@@ -507,7 +485,6 @@ func (d *DefaultErrorStrategy) SingleTokenDeletion(recognizer Parser) Token {
// a CommonToken of the appropriate type. The text will be the token.
// If you change what tokens must be created by the lexer,
// override d method to create the appropriate tokens.
-//
func (d *DefaultErrorStrategy) GetMissingSymbol(recognizer Parser) Token {
currentSymbol := recognizer.GetCurrentToken()
expecting := d.GetExpectedTokens(recognizer)
@@ -546,7 +523,6 @@ func (d *DefaultErrorStrategy) GetExpectedTokens(recognizer Parser) *IntervalSet
// the token). This is better than forcing you to override a method in
// your token objects because you don't have to go modify your lexer
// so that it creates a NewJava type.
-//
func (d *DefaultErrorStrategy) GetTokenErrorDisplay(t Token) string {
if t == nil {
return ""
@@ -578,7 +554,7 @@ func (d *DefaultErrorStrategy) escapeWSAndQuote(s string) string {
// from within the rule i.e., the FIRST computation done by
// ANTLR stops at the end of a rule.
//
-// EXAMPLE
+// # EXAMPLE
//
// When you find a "no viable alt exception", the input is not
// consistent with any of the alternatives for rule r. The best
@@ -597,7 +573,6 @@ func (d *DefaultErrorStrategy) escapeWSAndQuote(s string) string {
// c : ID
// | INT
//
-//
// At each rule invocation, the set of tokens that could follow
// that rule is pushed on a stack. Here are the various
// context-sensitive follow sets:
@@ -660,7 +635,6 @@ func (d *DefaultErrorStrategy) escapeWSAndQuote(s string) string {
//
// Like Grosch I implement context-sensitive FOLLOW sets that are combined
// at run-time upon error to avoid overhead during parsing.
-//
func (d *DefaultErrorStrategy) getErrorRecoverySet(recognizer Parser) *IntervalSet {
atn := recognizer.GetInterpreter().atn
ctx := recognizer.GetParserRuleContext()
@@ -733,7 +707,6 @@ func NewBailErrorStrategy() *BailErrorStrategy {
// in a {@link ParseCancellationException} so it is not caught by the
// rule func catches. Use {@link Exception//getCause()} to get the
// original {@link RecognitionException}.
-//
func (b *BailErrorStrategy) Recover(recognizer Parser, e RecognitionException) {
context := recognizer.GetParserRuleContext()
for context != nil {
@@ -749,7 +722,6 @@ func (b *BailErrorStrategy) Recover(recognizer Parser, e RecognitionException) {
// Make sure we don't attempt to recover inline if the parser
// successfully recovers, it won't panic an exception.
-//
func (b *BailErrorStrategy) RecoverInline(recognizer Parser) Token {
b.Recover(recognizer, NewInputMisMatchException(recognizer))
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/errors.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/errors.go
similarity index 98%
rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/errors.go
rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/errors.go
index 2ef74926ec..3954c13782 100644
--- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/errors.go
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/errors.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.
@@ -74,7 +74,6 @@ func (b *BaseRecognitionException) GetInputStream() IntStream {
// If the state number is not known, b method returns -1.
-//
// Gets the set of input symbols which could potentially follow the
// previously Matched symbol at the time b exception was panicn.
//
@@ -136,7 +135,6 @@ type NoViableAltException struct {
// to take based upon the remaining input. It tracks the starting token
// of the offending input and also knows where the parser was
// in the various paths when the error. Reported by ReportNoViableAlternative()
-//
func NewNoViableAltException(recognizer Parser, input TokenStream, startToken Token, offendingToken Token, deadEndConfigs ATNConfigSet, ctx ParserRuleContext) *NoViableAltException {
if ctx == nil {
@@ -177,7 +175,6 @@ type InputMisMatchException struct {
// This signifies any kind of mismatched input exceptions such as
// when the current input does not Match the expected token.
-//
func NewInputMisMatchException(recognizer Parser) *InputMisMatchException {
i := new(InputMisMatchException)
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/file_stream.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/file_stream.go
similarity index 92%
rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/file_stream.go
rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/file_stream.go
index 842170c086..bd6ad5efe3 100644
--- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/file_stream.go
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/file_stream.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/input_stream.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/input_stream.go
similarity index 96%
rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/input_stream.go
rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/input_stream.go
index 5ff270f536..a8b889cedb 100644
--- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/input_stream.go
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/input_stream.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/int_stream.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/int_stream.go
similarity index 82%
rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/int_stream.go
rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/int_stream.go
index 438e0ea6e7..4778878bd0 100644
--- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/int_stream.go
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/int_stream.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/interval_set.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/interval_set.go
similarity index 98%
rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/interval_set.go
rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/interval_set.go
index 1e9393adb6..c1e155e818 100644
--- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/interval_set.go
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/interval_set.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.
@@ -223,6 +223,10 @@ func (i *IntervalSet) StringVerbose(literalNames []string, symbolicNames []strin
return i.toIndexString()
}
+func (i *IntervalSet) GetIntervals() []*Interval {
+ return i.intervals
+}
+
func (i *IntervalSet) toCharString() string {
names := make([]string, len(i.intervals))
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/jcollect.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/jcollect.go
new file mode 100644
index 0000000000..e5a74f0c6c
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/jcollect.go
@@ -0,0 +1,198 @@
+package antlr
+
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+import (
+ "sort"
+)
+
+// Collectable is an interface that a struct should implement if it is to be
+// usable as a key in these collections.
+type Collectable[T any] interface {
+ Hash() int
+ Equals(other Collectable[T]) bool
+}
+
+type Comparator[T any] interface {
+ Hash1(o T) int
+ Equals2(T, T) bool
+}
+
+// JStore implements a container that allows the use of a struct to calculate the key
+// for a collection of values akin to map. This is not meant to be a full-blown HashMap but just
+// serve the needs of the ANTLR Go runtime.
+//
+// For ease of porting the logic of the runtime from the master target (Java), this collection
+// operates in a similar way to Java, in that it can use any struct that supplies a Hash() and Equals()
+// function as the key. The values are stored in a standard go map which internally is a form of hashmap
+// itself, the key for the go map is the hash supplied by the key object. The collection is able to deal with
+// hash conflicts by using a simple slice of values associated with the hash code indexed bucket. That isn't
+// particularly efficient, but it is simple, and it works. As this is specifically for the ANTLR runtime, and
+// we understand the requirements, then this is fine - this is not a general purpose collection.
+type JStore[T any, C Comparator[T]] struct {
+ store map[int][]T
+ len int
+ comparator Comparator[T]
+}
+
+func NewJStore[T any, C Comparator[T]](comparator Comparator[T]) *JStore[T, C] {
+
+ if comparator == nil {
+ panic("comparator cannot be nil")
+ }
+
+ s := &JStore[T, C]{
+ store: make(map[int][]T, 1),
+ comparator: comparator,
+ }
+ return s
+}
+
+// Put will store given value in the collection. Note that the key for storage is generated from
+// the value itself - this is specifically because that is what ANTLR needs - this would not be useful
+// as any kind of general collection.
+//
+// If the key has a hash conflict, then the value will be added to the slice of values associated with the
+// hash, unless the value is already in the slice, in which case the existing value is returned. Value equivalence is
+// tested by calling the equals() method on the key.
+//
+// # If the given value is already present in the store, then the existing value is returned as v and exists is set to true
+//
+// If the given value is not present in the store, then the value is added to the store and returned as v and exists is set to false.
+func (s *JStore[T, C]) Put(value T) (v T, exists bool) { //nolint:ireturn
+
+ kh := s.comparator.Hash1(value)
+
+ for _, v1 := range s.store[kh] {
+ if s.comparator.Equals2(value, v1) {
+ return v1, true
+ }
+ }
+ s.store[kh] = append(s.store[kh], value)
+ s.len++
+ return value, false
+}
+
+// Get will return the value associated with the key - the type of the key is the same type as the value
+// which would not generally be useful, but this is a specific thing for ANTLR where the key is
+// generated using the object we are going to store.
+func (s *JStore[T, C]) Get(key T) (T, bool) { //nolint:ireturn
+
+ kh := s.comparator.Hash1(key)
+
+ for _, v := range s.store[kh] {
+ if s.comparator.Equals2(key, v) {
+ return v, true
+ }
+ }
+ return key, false
+}
+
+// Contains returns true if the given key is present in the store
+func (s *JStore[T, C]) Contains(key T) bool { //nolint:ireturn
+
+ _, present := s.Get(key)
+ return present
+}
+
+func (s *JStore[T, C]) SortedSlice(less func(i, j T) bool) []T {
+ vs := make([]T, 0, len(s.store))
+ for _, v := range s.store {
+ vs = append(vs, v...)
+ }
+ sort.Slice(vs, func(i, j int) bool {
+ return less(vs[i], vs[j])
+ })
+
+ return vs
+}
+
+func (s *JStore[T, C]) Each(f func(T) bool) {
+ for _, e := range s.store {
+ for _, v := range e {
+ f(v)
+ }
+ }
+}
+
+func (s *JStore[T, C]) Len() int {
+ return s.len
+}
+
+func (s *JStore[T, C]) Values() []T {
+ vs := make([]T, 0, len(s.store))
+ for _, e := range s.store {
+ for _, v := range e {
+ vs = append(vs, v)
+ }
+ }
+ return vs
+}
+
+type entry[K, V any] struct {
+ key K
+ val V
+}
+
+type JMap[K, V any, C Comparator[K]] struct {
+ store map[int][]*entry[K, V]
+ len int
+ comparator Comparator[K]
+}
+
+func NewJMap[K, V any, C Comparator[K]](comparator Comparator[K]) *JMap[K, V, C] {
+ return &JMap[K, V, C]{
+ store: make(map[int][]*entry[K, V], 1),
+ comparator: comparator,
+ }
+}
+
+func (m *JMap[K, V, C]) Put(key K, val V) {
+ kh := m.comparator.Hash1(key)
+
+ m.store[kh] = append(m.store[kh], &entry[K, V]{key, val})
+ m.len++
+}
+
+func (m *JMap[K, V, C]) Values() []V {
+ vs := make([]V, 0, len(m.store))
+ for _, e := range m.store {
+ for _, v := range e {
+ vs = append(vs, v.val)
+ }
+ }
+ return vs
+}
+
+func (m *JMap[K, V, C]) Get(key K) (V, bool) {
+
+ var none V
+ kh := m.comparator.Hash1(key)
+ for _, e := range m.store[kh] {
+ if m.comparator.Equals2(e.key, key) {
+ return e.val, true
+ }
+ }
+ return none, false
+}
+
+func (m *JMap[K, V, C]) Len() int {
+ return len(m.store)
+}
+
+func (m *JMap[K, V, C]) Delete(key K) {
+ kh := m.comparator.Hash1(key)
+ for i, e := range m.store[kh] {
+ if m.comparator.Equals2(e.key, key) {
+ m.store[kh] = append(m.store[kh][:i], m.store[kh][i+1:]...)
+ m.len--
+ return
+ }
+ }
+}
+
+func (m *JMap[K, V, C]) Clear() {
+ m.store = make(map[int][]*entry[K, V])
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer.go
similarity index 98%
rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer.go
rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer.go
index b04f04572f..6533f05164 100644
--- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer.go
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.
@@ -232,8 +232,6 @@ func (b *BaseLexer) NextToken() Token {
}
return b.token
}
-
- return nil
}
// Instruct the lexer to Skip creating a token for current lexer rule
@@ -342,7 +340,7 @@ func (b *BaseLexer) GetCharIndex() int {
}
// Return the text Matched so far for the current token or any text override.
-//Set the complete text of l token it wipes any previous changes to the text.
+// Set the complete text of l token it wipes any previous changes to the text.
func (b *BaseLexer) GetText() string {
if b.text != "" {
return b.text
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_action.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_action.go
similarity index 91%
rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_action.go
rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_action.go
index 5a325be137..111656c295 100644
--- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_action.go
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_action.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.
@@ -21,8 +21,8 @@ type LexerAction interface {
getActionType() int
getIsPositionDependent() bool
execute(lexer Lexer)
- hash() int
- equals(other LexerAction) bool
+ Hash() int
+ Equals(other LexerAction) bool
}
type BaseLexerAction struct {
@@ -51,15 +51,14 @@ func (b *BaseLexerAction) getIsPositionDependent() bool {
return b.isPositionDependent
}
-func (b *BaseLexerAction) hash() int {
+func (b *BaseLexerAction) Hash() int {
return b.actionType
}
-func (b *BaseLexerAction) equals(other LexerAction) bool {
+func (b *BaseLexerAction) Equals(other LexerAction) bool {
return b == other
}
-//
// Implements the {@code Skip} lexer action by calling {@link Lexer//Skip}.
//
// The {@code Skip} command does not have any parameters, so l action is
@@ -85,7 +84,8 @@ func (l *LexerSkipAction) String() string {
return "skip"
}
-// Implements the {@code type} lexer action by calling {@link Lexer//setType}
+// Implements the {@code type} lexer action by calling {@link Lexer//setType}
+//
// with the assigned type.
type LexerTypeAction struct {
*BaseLexerAction
@@ -104,14 +104,14 @@ func (l *LexerTypeAction) execute(lexer Lexer) {
lexer.SetType(l.thetype)
}
-func (l *LexerTypeAction) hash() int {
+func (l *LexerTypeAction) Hash() int {
h := murmurInit(0)
h = murmurUpdate(h, l.actionType)
h = murmurUpdate(h, l.thetype)
return murmurFinish(h, 2)
}
-func (l *LexerTypeAction) equals(other LexerAction) bool {
+func (l *LexerTypeAction) Equals(other LexerAction) bool {
if l == other {
return true
} else if _, ok := other.(*LexerTypeAction); !ok {
@@ -148,14 +148,14 @@ func (l *LexerPushModeAction) execute(lexer Lexer) {
lexer.PushMode(l.mode)
}
-func (l *LexerPushModeAction) hash() int {
+func (l *LexerPushModeAction) Hash() int {
h := murmurInit(0)
h = murmurUpdate(h, l.actionType)
h = murmurUpdate(h, l.mode)
return murmurFinish(h, 2)
}
-func (l *LexerPushModeAction) equals(other LexerAction) bool {
+func (l *LexerPushModeAction) Equals(other LexerAction) bool {
if l == other {
return true
} else if _, ok := other.(*LexerPushModeAction); !ok {
@@ -245,14 +245,14 @@ func (l *LexerModeAction) execute(lexer Lexer) {
lexer.SetMode(l.mode)
}
-func (l *LexerModeAction) hash() int {
+func (l *LexerModeAction) Hash() int {
h := murmurInit(0)
h = murmurUpdate(h, l.actionType)
h = murmurUpdate(h, l.mode)
return murmurFinish(h, 2)
}
-func (l *LexerModeAction) equals(other LexerAction) bool {
+func (l *LexerModeAction) Equals(other LexerAction) bool {
if l == other {
return true
} else if _, ok := other.(*LexerModeAction); !ok {
@@ -303,7 +303,7 @@ func (l *LexerCustomAction) execute(lexer Lexer) {
lexer.Action(nil, l.ruleIndex, l.actionIndex)
}
-func (l *LexerCustomAction) hash() int {
+func (l *LexerCustomAction) Hash() int {
h := murmurInit(0)
h = murmurUpdate(h, l.actionType)
h = murmurUpdate(h, l.ruleIndex)
@@ -311,13 +311,14 @@ func (l *LexerCustomAction) hash() int {
return murmurFinish(h, 3)
}
-func (l *LexerCustomAction) equals(other LexerAction) bool {
+func (l *LexerCustomAction) Equals(other LexerAction) bool {
if l == other {
return true
} else if _, ok := other.(*LexerCustomAction); !ok {
return false
} else {
- return l.ruleIndex == other.(*LexerCustomAction).ruleIndex && l.actionIndex == other.(*LexerCustomAction).actionIndex
+ return l.ruleIndex == other.(*LexerCustomAction).ruleIndex &&
+ l.actionIndex == other.(*LexerCustomAction).actionIndex
}
}
@@ -344,14 +345,14 @@ func (l *LexerChannelAction) execute(lexer Lexer) {
lexer.SetChannel(l.channel)
}
-func (l *LexerChannelAction) hash() int {
+func (l *LexerChannelAction) Hash() int {
h := murmurInit(0)
h = murmurUpdate(h, l.actionType)
h = murmurUpdate(h, l.channel)
return murmurFinish(h, 2)
}
-func (l *LexerChannelAction) equals(other LexerAction) bool {
+func (l *LexerChannelAction) Equals(other LexerAction) bool {
if l == other {
return true
} else if _, ok := other.(*LexerChannelAction); !ok {
@@ -412,10 +413,10 @@ func (l *LexerIndexedCustomAction) execute(lexer Lexer) {
l.lexerAction.execute(lexer)
}
-func (l *LexerIndexedCustomAction) hash() int {
+func (l *LexerIndexedCustomAction) Hash() int {
h := murmurInit(0)
h = murmurUpdate(h, l.offset)
- h = murmurUpdate(h, l.lexerAction.hash())
+ h = murmurUpdate(h, l.lexerAction.Hash())
return murmurFinish(h, 2)
}
@@ -425,6 +426,7 @@ func (l *LexerIndexedCustomAction) equals(other LexerAction) bool {
} else if _, ok := other.(*LexerIndexedCustomAction); !ok {
return false
} else {
- return l.offset == other.(*LexerIndexedCustomAction).offset && l.lexerAction == other.(*LexerIndexedCustomAction).lexerAction
+ return l.offset == other.(*LexerIndexedCustomAction).offset &&
+ l.lexerAction.Equals(other.(*LexerIndexedCustomAction).lexerAction)
}
}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_action_executor.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_action_executor.go
similarity index 88%
rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_action_executor.go
rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_action_executor.go
index 056941dd6e..be1ba7a7e3 100644
--- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_action_executor.go
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_action_executor.go
@@ -1,9 +1,11 @@
-// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.
package antlr
+import "golang.org/x/exp/slices"
+
// Represents an executor for a sequence of lexer actions which traversed during
// the Matching operation of a lexer rule (token).
//
@@ -12,8 +14,8 @@ package antlr
// not cause bloating of the {@link DFA} created for the lexer.
type LexerActionExecutor struct {
- lexerActions []LexerAction
- cachedHash int
+ lexerActions []LexerAction
+ cachedHash int
}
func NewLexerActionExecutor(lexerActions []LexerAction) *LexerActionExecutor {
@@ -30,7 +32,7 @@ func NewLexerActionExecutor(lexerActions []LexerAction) *LexerActionExecutor {
// of the performance-critical {@link LexerATNConfig//hashCode} operation.
l.cachedHash = murmurInit(57)
for _, a := range lexerActions {
- l.cachedHash = murmurUpdate(l.cachedHash, a.hash())
+ l.cachedHash = murmurUpdate(l.cachedHash, a.Hash())
}
return l
@@ -151,14 +153,17 @@ func (l *LexerActionExecutor) execute(lexer Lexer, input CharStream, startIndex
}
}
-func (l *LexerActionExecutor) hash() int {
+func (l *LexerActionExecutor) Hash() int {
if l == nil {
+ // TODO: Why is this here? l should not be nil
return 61
}
+
+ // TODO: This is created from the action itself when the struct is created - will this be an issue at some point? Java uses the runtime assign hashcode
return l.cachedHash
}
-func (l *LexerActionExecutor) equals(other interface{}) bool {
+func (l *LexerActionExecutor) Equals(other interface{}) bool {
if l == other {
return true
}
@@ -169,5 +174,13 @@ func (l *LexerActionExecutor) equals(other interface{}) bool {
if othert == nil {
return false
}
- return l.cachedHash == othert.cachedHash && &l.lexerActions == &othert.lexerActions
+ if l.cachedHash != othert.cachedHash {
+ return false
+ }
+ if len(l.lexerActions) != len(othert.lexerActions) {
+ return false
+ }
+ return slices.EqualFunc(l.lexerActions, othert.lexerActions, func(i, j LexerAction) bool {
+ return i.Equals(j)
+ })
}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_atn_simulator.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_atn_simulator.go
similarity index 98%
rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_atn_simulator.go
rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_atn_simulator.go
index dc05153ea4..c573b75210 100644
--- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_atn_simulator.go
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_atn_simulator.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.
@@ -591,19 +591,24 @@ func (l *LexerATNSimulator) addDFAState(configs ATNConfigSet, suppressEdge bool)
proposed.lexerActionExecutor = firstConfigWithRuleStopState.(*LexerATNConfig).lexerActionExecutor
proposed.setPrediction(l.atn.ruleToTokenType[firstConfigWithRuleStopState.GetState().GetRuleIndex()])
}
- hash := proposed.hash()
dfa := l.decisionToDFA[l.mode]
l.atn.stateMu.Lock()
defer l.atn.stateMu.Unlock()
- existing, ok := dfa.getState(hash)
- if ok {
+ existing, present := dfa.states.Get(proposed)
+ if present {
+
+ // This state was already present, so just return it.
+ //
proposed = existing
} else {
- proposed.stateNumber = dfa.numStates()
+
+ // We need to add the new state
+ //
+ proposed.stateNumber = dfa.states.Len()
configs.SetReadOnly(true)
proposed.configs = configs
- dfa.setState(hash, proposed)
+ dfa.states.Put(proposed)
}
if !suppressEdge {
dfa.setS0(proposed)
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/ll1_analyzer.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/ll1_analyzer.go
similarity index 87%
rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/ll1_analyzer.go
rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/ll1_analyzer.go
index 6ffb37de69..76689615a6 100644
--- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/ll1_analyzer.go
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/ll1_analyzer.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.
@@ -14,14 +14,15 @@ func NewLL1Analyzer(atn *ATN) *LL1Analyzer {
return la
}
-//* Special value added to the lookahead sets to indicate that we hit
-// a predicate during analysis if {@code seeThruPreds==false}.
-///
+// - Special value added to the lookahead sets to indicate that we hit
+// a predicate during analysis if {@code seeThruPreds==false}.
+//
+// /
const (
LL1AnalyzerHitPred = TokenInvalidType
)
-//*
+// *
// Calculates the SLL(1) expected lookahead set for each outgoing transition
// of an {@link ATNState}. The returned array has one element for each
// outgoing transition in {@code s}. If the closure from transition
@@ -38,7 +39,7 @@ func (la *LL1Analyzer) getDecisionLookahead(s ATNState) []*IntervalSet {
look := make([]*IntervalSet, count)
for alt := 0; alt < count; alt++ {
look[alt] = NewIntervalSet()
- lookBusy := newArray2DHashSet(nil, nil)
+ lookBusy := NewJStore[ATNConfig, Comparator[ATNConfig]](aConfEqInst)
seeThruPreds := false // fail to get lookahead upon pred
la.look1(s.GetTransitions()[alt].getTarget(), nil, BasePredictionContextEMPTY, look[alt], lookBusy, NewBitSet(), seeThruPreds, false)
// Wipe out lookahead for la alternative if we found nothing
@@ -50,7 +51,7 @@ func (la *LL1Analyzer) getDecisionLookahead(s ATNState) []*IntervalSet {
return look
}
-//*
+// *
// Compute set of tokens that can follow {@code s} in the ATN in the
// specified {@code ctx}.
//
@@ -67,7 +68,7 @@ func (la *LL1Analyzer) getDecisionLookahead(s ATNState) []*IntervalSet {
//
// @return The set of tokens that can follow {@code s} in the ATN in the
// specified {@code ctx}.
-///
+// /
func (la *LL1Analyzer) Look(s, stopState ATNState, ctx RuleContext) *IntervalSet {
r := NewIntervalSet()
seeThruPreds := true // ignore preds get all lookahead
@@ -75,7 +76,7 @@ func (la *LL1Analyzer) Look(s, stopState ATNState, ctx RuleContext) *IntervalSet
if ctx != nil {
lookContext = predictionContextFromRuleContext(s.GetATN(), ctx)
}
- la.look1(s, stopState, lookContext, r, newArray2DHashSet(nil, nil), NewBitSet(), seeThruPreds, true)
+ la.look1(s, stopState, lookContext, r, NewJStore[ATNConfig, Comparator[ATNConfig]](aConfEqInst), NewBitSet(), seeThruPreds, true)
return r
}
@@ -109,14 +110,14 @@ func (la *LL1Analyzer) Look(s, stopState ATNState, ctx RuleContext) *IntervalSet
// outermost context is reached. This parameter has no effect if {@code ctx}
// is {@code nil}.
-func (la *LL1Analyzer) look2(s, stopState ATNState, ctx PredictionContext, look *IntervalSet, lookBusy Set, calledRuleStack *BitSet, seeThruPreds, addEOF bool, i int) {
+func (la *LL1Analyzer) look2(s, stopState ATNState, ctx PredictionContext, look *IntervalSet, lookBusy *JStore[ATNConfig, Comparator[ATNConfig]], calledRuleStack *BitSet, seeThruPreds, addEOF bool, i int) {
returnState := la.atn.states[ctx.getReturnState(i)]
la.look1(returnState, stopState, ctx.GetParent(i), look, lookBusy, calledRuleStack, seeThruPreds, addEOF)
}
-func (la *LL1Analyzer) look1(s, stopState ATNState, ctx PredictionContext, look *IntervalSet, lookBusy Set, calledRuleStack *BitSet, seeThruPreds, addEOF bool) {
+func (la *LL1Analyzer) look1(s, stopState ATNState, ctx PredictionContext, look *IntervalSet, lookBusy *JStore[ATNConfig, Comparator[ATNConfig]], calledRuleStack *BitSet, seeThruPreds, addEOF bool) {
c := NewBaseATNConfig6(s, 0, ctx)
@@ -124,8 +125,11 @@ func (la *LL1Analyzer) look1(s, stopState ATNState, ctx PredictionContext, look
return
}
- lookBusy.Add(c)
+ _, present := lookBusy.Put(c)
+ if present {
+ return
+ }
if s == stopState {
if ctx == nil {
look.addOne(TokenEpsilon)
@@ -198,7 +202,7 @@ func (la *LL1Analyzer) look1(s, stopState ATNState, ctx PredictionContext, look
}
}
-func (la *LL1Analyzer) look3(stopState ATNState, ctx PredictionContext, look *IntervalSet, lookBusy Set, calledRuleStack *BitSet, seeThruPreds, addEOF bool, t1 *RuleTransition) {
+func (la *LL1Analyzer) look3(stopState ATNState, ctx PredictionContext, look *IntervalSet, lookBusy *JStore[ATNConfig, Comparator[ATNConfig]], calledRuleStack *BitSet, seeThruPreds, addEOF bool, t1 *RuleTransition) {
newContext := SingletonBasePredictionContextCreate(ctx, t1.followState.GetStateNumber())
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser.go
similarity index 99%
rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser.go
rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser.go
index 2ab2f56052..d26bf06392 100644
--- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser.go
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.
@@ -91,7 +91,6 @@ func NewBaseParser(input TokenStream) *BaseParser {
// bypass alternatives.
//
// @see ATNDeserializationOptions//isGenerateRuleBypassTransitions()
-//
var bypassAltsAtnCache = make(map[string]int)
// reset the parser's state//
@@ -230,7 +229,6 @@ func (p *BaseParser) GetParseListeners() []ParseTreeListener {
// @param listener the listener to add
//
// @panics nilPointerException if {@code} listener is {@code nil}
-//
func (p *BaseParser) AddParseListener(listener ParseTreeListener) {
if listener == nil {
panic("listener")
@@ -241,13 +239,11 @@ func (p *BaseParser) AddParseListener(listener ParseTreeListener) {
p.parseListeners = append(p.parseListeners, listener)
}
-//
// Remove {@code listener} from the list of parse listeners.
//
// If {@code listener} is {@code nil} or has not been added as a parse
// listener, p.method does nothing.
// @param listener the listener to remove
-//
func (p *BaseParser) RemoveParseListener(listener ParseTreeListener) {
if p.parseListeners != nil {
@@ -289,11 +285,9 @@ func (p *BaseParser) TriggerEnterRuleEvent() {
}
}
-//
// Notify any parse listeners of an exit rule event.
//
// @see //addParseListener
-//
func (p *BaseParser) TriggerExitRuleEvent() {
if p.parseListeners != nil {
// reverse order walk of listeners
@@ -330,7 +324,6 @@ func (p *BaseParser) setTokenFactory(factory TokenFactory) {
//
// @panics UnsupportedOperationException if the current parser does not
// implement the {@link //getSerializedATN()} method.
-//
func (p *BaseParser) GetATNWithBypassAlts() {
// TODO
@@ -402,7 +395,6 @@ func (p *BaseParser) SetTokenStream(input TokenStream) {
// Match needs to return the current input symbol, which gets put
// into the label for the associated token ref e.g., x=ID.
-//
func (p *BaseParser) GetCurrentToken() Token {
return p.input.LT(1)
}
@@ -624,7 +616,6 @@ func (p *BaseParser) IsExpectedToken(symbol int) bool {
// respectively.
//
// @see ATN//getExpectedTokens(int, RuleContext)
-//
func (p *BaseParser) GetExpectedTokens() *IntervalSet {
return p.Interpreter.atn.getExpectedTokens(p.state, p.ctx)
}
@@ -686,7 +677,7 @@ func (p *BaseParser) GetDFAStrings() string {
func (p *BaseParser) DumpDFA() {
seenOne := false
for _, dfa := range p.Interpreter.decisionToDFA {
- if dfa.numStates() > 0 {
+ if dfa.states.Len() > 0 {
if seenOne {
fmt.Println()
}
@@ -703,7 +694,6 @@ func (p *BaseParser) GetSourceName() string {
// During a parse is sometimes useful to listen in on the rule entry and exit
// events as well as token Matches. p.is for quick and dirty debugging.
-//
func (p *BaseParser) SetTrace(trace *TraceListener) {
if trace == nil {
p.RemoveParseListener(p.tracer)
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser_atn_simulator.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser_atn_simulator.go
similarity index 94%
rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser_atn_simulator.go
rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser_atn_simulator.go
index 888d512975..8bcc46a0d9 100644
--- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser_atn_simulator.go
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser_atn_simulator.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.
@@ -11,11 +11,11 @@ import (
)
var (
- ParserATNSimulatorDebug = false
- ParserATNSimulatorListATNDecisions = false
- ParserATNSimulatorDFADebug = false
- ParserATNSimulatorRetryDebug = false
- TurnOffLRLoopEntryBranchOpt = false
+ ParserATNSimulatorDebug = false
+ ParserATNSimulatorTraceATNSim = false
+ ParserATNSimulatorDFADebug = false
+ ParserATNSimulatorRetryDebug = false
+ TurnOffLRLoopEntryBranchOpt = false
)
type ParserATNSimulator struct {
@@ -70,8 +70,8 @@ func (p *ParserATNSimulator) reset() {
}
func (p *ParserATNSimulator) AdaptivePredict(input TokenStream, decision int, outerContext ParserRuleContext) int {
- if ParserATNSimulatorDebug || ParserATNSimulatorListATNDecisions {
- fmt.Println("AdaptivePredict decision " + strconv.Itoa(decision) +
+ if ParserATNSimulatorDebug || ParserATNSimulatorTraceATNSim {
+ fmt.Println("adaptivePredict decision " + strconv.Itoa(decision) +
" exec LA(1)==" + p.getLookaheadName(input) +
" line " + strconv.Itoa(input.LT(1).GetLine()) + ":" +
strconv.Itoa(input.LT(1).GetColumn()))
@@ -111,15 +111,15 @@ func (p *ParserATNSimulator) AdaptivePredict(input TokenStream, decision int, ou
if s0 == nil {
if outerContext == nil {
- outerContext = RuleContextEmpty
+ outerContext = ParserRuleContextEmpty
}
- if ParserATNSimulatorDebug || ParserATNSimulatorListATNDecisions {
+ if ParserATNSimulatorDebug {
fmt.Println("predictATN decision " + strconv.Itoa(dfa.decision) +
" exec LA(1)==" + p.getLookaheadName(input) +
", outerContext=" + outerContext.String(p.parser.GetRuleNames(), nil))
}
fullCtx := false
- s0Closure := p.computeStartState(dfa.atnStartState, RuleContextEmpty, fullCtx)
+ s0Closure := p.computeStartState(dfa.atnStartState, ParserRuleContextEmpty, fullCtx)
p.atn.stateMu.Lock()
if dfa.getPrecedenceDfa() {
@@ -174,17 +174,18 @@ func (p *ParserATNSimulator) AdaptivePredict(input TokenStream, decision int, ou
// Reporting insufficient predicates
// cover these cases:
-// dead end
-// single alt
-// single alt + preds
-// conflict
-// conflict + preds
//
+// dead end
+// single alt
+// single alt + preds
+// conflict
+// conflict + preds
func (p *ParserATNSimulator) execATN(dfa *DFA, s0 *DFAState, input TokenStream, startIndex int, outerContext ParserRuleContext) int {
- if ParserATNSimulatorDebug || ParserATNSimulatorListATNDecisions {
+ if ParserATNSimulatorDebug || ParserATNSimulatorTraceATNSim {
fmt.Println("execATN decision " + strconv.Itoa(dfa.decision) +
- " exec LA(1)==" + p.getLookaheadName(input) +
+ ", DFA state " + s0.String() +
+ ", LA(1)==" + p.getLookaheadName(input) +
" line " + strconv.Itoa(input.LT(1).GetLine()) + ":" + strconv.Itoa(input.LT(1).GetColumn()))
}
@@ -277,8 +278,6 @@ func (p *ParserATNSimulator) execATN(dfa *DFA, s0 *DFAState, input TokenStream,
t = input.LA(1)
}
}
-
- panic("Should not have reached p state")
}
// Get an existing target state for an edge in the DFA. If the target state
@@ -384,7 +383,7 @@ func (p *ParserATNSimulator) predicateDFAState(dfaState *DFAState, decisionState
// comes back with reach.uniqueAlt set to a valid alt
func (p *ParserATNSimulator) execATNWithFullContext(dfa *DFA, D *DFAState, s0 ATNConfigSet, input TokenStream, startIndex int, outerContext ParserRuleContext) int {
- if ParserATNSimulatorDebug || ParserATNSimulatorListATNDecisions {
+ if ParserATNSimulatorDebug || ParserATNSimulatorTraceATNSim {
fmt.Println("execATNWithFullContext " + s0.String())
}
@@ -492,9 +491,6 @@ func (p *ParserATNSimulator) execATNWithFullContext(dfa *DFA, D *DFAState, s0 AT
}
func (p *ParserATNSimulator) computeReachSet(closure ATNConfigSet, t int, fullCtx bool) ATNConfigSet {
- if ParserATNSimulatorDebug {
- fmt.Println("in computeReachSet, starting closure: " + closure.String())
- }
if p.mergeCache == nil {
p.mergeCache = NewDoubleDict()
}
@@ -570,7 +566,7 @@ func (p *ParserATNSimulator) computeReachSet(closure ATNConfigSet, t int, fullCt
//
if reach == nil {
reach = NewBaseATNConfigSet(fullCtx)
- closureBusy := newArray2DHashSet(nil, nil)
+ closureBusy := NewJStore[ATNConfig, Comparator[ATNConfig]](aConfEqInst)
treatEOFAsEpsilon := t == TokenEOF
amount := len(intermediate.configs)
for k := 0; k < amount; k++ {
@@ -610,6 +606,11 @@ func (p *ParserATNSimulator) computeReachSet(closure ATNConfigSet, t int, fullCt
reach.Add(skippedStopStates[l], p.mergeCache)
}
}
+
+ if ParserATNSimulatorTraceATNSim {
+ fmt.Println("computeReachSet " + closure.String() + " -> " + reach.String())
+ }
+
if len(reach.GetItems()) == 0 {
return nil
}
@@ -617,7 +618,6 @@ func (p *ParserATNSimulator) computeReachSet(closure ATNConfigSet, t int, fullCt
return reach
}
-//
// Return a configuration set containing only the configurations from
// {@code configs} which are in a {@link RuleStopState}. If all
// configurations in {@code configs} are already in a rule stop state, p
@@ -636,7 +636,6 @@ func (p *ParserATNSimulator) computeReachSet(closure ATNConfigSet, t int, fullCt
// @return {@code configs} if all configurations in {@code configs} are in a
// rule stop state, otherwise return a Newconfiguration set containing only
// the configurations from {@code configs} which are in a rule stop state
-//
func (p *ParserATNSimulator) removeAllConfigsNotInRuleStopState(configs ATNConfigSet, lookToEndOfRule bool) ATNConfigSet {
if PredictionModeallConfigsInRuleStopStates(configs) {
return configs
@@ -662,16 +661,20 @@ func (p *ParserATNSimulator) computeStartState(a ATNState, ctx RuleContext, full
// always at least the implicit call to start rule
initialContext := predictionContextFromRuleContext(p.atn, ctx)
configs := NewBaseATNConfigSet(fullCtx)
+ if ParserATNSimulatorDebug || ParserATNSimulatorTraceATNSim {
+ fmt.Println("computeStartState from ATN state " + a.String() +
+ " initialContext=" + initialContext.String())
+ }
+
for i := 0; i < len(a.GetTransitions()); i++ {
target := a.GetTransitions()[i].getTarget()
c := NewBaseATNConfig6(target, i+1, initialContext)
- closureBusy := newArray2DHashSet(nil, nil)
+ closureBusy := NewJStore[ATNConfig, Comparator[ATNConfig]](atnConfCompInst)
p.closure(c, configs, closureBusy, true, fullCtx, false)
}
return configs
}
-//
// This method transforms the start state computed by
// {@link //computeStartState} to the special start state used by a
// precedence DFA for a particular precedence value. The transformation
@@ -726,7 +729,6 @@ func (p *ParserATNSimulator) computeStartState(a ATNState, ctx RuleContext, full
// @return The transformed configuration set representing the start state
// for a precedence DFA at a particular precedence level (determined by
// calling {@link Parser//getPrecedence}).
-//
func (p *ParserATNSimulator) applyPrecedenceFilter(configs ATNConfigSet) ATNConfigSet {
statesFromAlt1 := make(map[int]PredictionContext)
@@ -760,7 +762,7 @@ func (p *ParserATNSimulator) applyPrecedenceFilter(configs ATNConfigSet) ATNConf
// (basically a graph subtraction algorithm).
if !config.getPrecedenceFilterSuppressed() {
context := statesFromAlt1[config.GetState().GetStateNumber()]
- if context != nil && context.equals(config.GetContext()) {
+ if context != nil && context.Equals(config.GetContext()) {
// eliminated
continue
}
@@ -824,7 +826,6 @@ func (p *ParserATNSimulator) getPredicatePredictions(ambigAlts *BitSet, altToPre
return pairs
}
-//
// This method is used to improve the localization of error messages by
// choosing an alternative rather than panicing a
// {@link NoViableAltException} in particular prediction scenarios where the
@@ -869,7 +870,6 @@ func (p *ParserATNSimulator) getPredicatePredictions(ambigAlts *BitSet, altToPre
// @return The value to return from {@link //AdaptivePredict}, or
// {@link ATN//INVALID_ALT_NUMBER} if a suitable alternative was not
// identified and {@link //AdaptivePredict} should Report an error instead.
-//
func (p *ParserATNSimulator) getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(configs ATNConfigSet, outerContext ParserRuleContext) int {
cfgs := p.splitAccordingToSemanticValidity(configs, outerContext)
semValidConfigs := cfgs[0]
@@ -938,11 +938,11 @@ func (p *ParserATNSimulator) splitAccordingToSemanticValidity(configs ATNConfigS
}
// Look through a list of predicate/alt pairs, returning alts for the
-// pairs that win. A {@code NONE} predicate indicates an alt containing an
-// unpredicated config which behaves as "always true." If !complete
-// then we stop at the first predicate that evaluates to true. This
-// includes pairs with nil predicates.
//
+// pairs that win. A {@code NONE} predicate indicates an alt containing an
+// unpredicated config which behaves as "always true." If !complete
+// then we stop at the first predicate that evaluates to true. This
+// includes pairs with nil predicates.
func (p *ParserATNSimulator) evalSemanticContext(predPredictions []*PredPrediction, outerContext ParserRuleContext, complete bool) *BitSet {
predictions := NewBitSet()
for i := 0; i < len(predPredictions); i++ {
@@ -972,16 +972,16 @@ func (p *ParserATNSimulator) evalSemanticContext(predPredictions []*PredPredicti
return predictions
}
-func (p *ParserATNSimulator) closure(config ATNConfig, configs ATNConfigSet, closureBusy Set, collectPredicates, fullCtx, treatEOFAsEpsilon bool) {
+func (p *ParserATNSimulator) closure(config ATNConfig, configs ATNConfigSet, closureBusy *JStore[ATNConfig, Comparator[ATNConfig]], collectPredicates, fullCtx, treatEOFAsEpsilon bool) {
initialDepth := 0
p.closureCheckingStopState(config, configs, closureBusy, collectPredicates,
fullCtx, initialDepth, treatEOFAsEpsilon)
}
-func (p *ParserATNSimulator) closureCheckingStopState(config ATNConfig, configs ATNConfigSet, closureBusy Set, collectPredicates, fullCtx bool, depth int, treatEOFAsEpsilon bool) {
- if ParserATNSimulatorDebug {
+func (p *ParserATNSimulator) closureCheckingStopState(config ATNConfig, configs ATNConfigSet, closureBusy *JStore[ATNConfig, Comparator[ATNConfig]], collectPredicates, fullCtx bool, depth int, treatEOFAsEpsilon bool) {
+ if ParserATNSimulatorTraceATNSim {
fmt.Println("closure(" + config.String() + ")")
- fmt.Println("configs(" + configs.String() + ")")
+ //fmt.Println("configs(" + configs.String() + ")")
if config.GetReachesIntoOuterContext() > 50 {
panic("problem")
}
@@ -1031,7 +1031,7 @@ func (p *ParserATNSimulator) closureCheckingStopState(config ATNConfig, configs
}
// Do the actual work of walking epsilon edges//
-func (p *ParserATNSimulator) closureWork(config ATNConfig, configs ATNConfigSet, closureBusy Set, collectPredicates, fullCtx bool, depth int, treatEOFAsEpsilon bool) {
+func (p *ParserATNSimulator) closureWork(config ATNConfig, configs ATNConfigSet, closureBusy *JStore[ATNConfig, Comparator[ATNConfig]], collectPredicates, fullCtx bool, depth int, treatEOFAsEpsilon bool) {
state := config.GetState()
// optimization
if !state.GetEpsilonOnlyTransitions() {
@@ -1066,7 +1066,8 @@ func (p *ParserATNSimulator) closureWork(config ATNConfig, configs ATNConfigSet,
c.SetReachesIntoOuterContext(c.GetReachesIntoOuterContext() + 1)
- if closureBusy.Add(c) != c {
+ _, present := closureBusy.Put(c)
+ if present {
// avoid infinite recursion for right-recursive rules
continue
}
@@ -1077,9 +1078,13 @@ func (p *ParserATNSimulator) closureWork(config ATNConfig, configs ATNConfigSet,
fmt.Println("dips into outer ctx: " + c.String())
}
} else {
- if !t.getIsEpsilon() && closureBusy.Add(c) != c {
- // avoid infinite recursion for EOF* and EOF+
- continue
+
+ if !t.getIsEpsilon() {
+ _, present := closureBusy.Put(c)
+ if present {
+ // avoid infinite recursion for EOF* and EOF+
+ continue
+ }
}
if _, ok := t.(*RuleTransition); ok {
// latch when newDepth goes negative - once we step out of the entry context we can't return
@@ -1104,7 +1109,16 @@ func (p *ParserATNSimulator) canDropLoopEntryEdgeInLeftRecursiveRule(config ATNC
// left-recursion elimination. For efficiency, also check if
// the context has an empty stack case. If so, it would mean
// global FOLLOW so we can't perform optimization
- if startLoop, ok := _p.(StarLoopEntryState); !ok || !startLoop.precedenceRuleDecision || config.GetContext().isEmpty() || config.GetContext().hasEmptyPath() {
+ if _p.GetStateType() != ATNStateStarLoopEntry {
+ return false
+ }
+ startLoop, ok := _p.(*StarLoopEntryState)
+ if !ok {
+ return false
+ }
+ if !startLoop.precedenceRuleDecision ||
+ config.GetContext().isEmpty() ||
+ config.GetContext().hasEmptyPath() {
return false
}
@@ -1117,8 +1131,8 @@ func (p *ParserATNSimulator) canDropLoopEntryEdgeInLeftRecursiveRule(config ATNC
return false
}
}
-
- decisionStartState := _p.(BlockStartState).GetTransitions()[0].getTarget().(BlockStartState)
+ x := _p.GetTransitions()[0].getTarget()
+ decisionStartState := x.(BlockStartState)
blockEndStateNum := decisionStartState.getEndState().stateNumber
blockEndState := p.atn.states[blockEndStateNum].(*BlockEndState)
@@ -1355,13 +1369,12 @@ func (p *ParserATNSimulator) GetTokenName(t int) string {
return "EOF"
}
- if p.parser != nil && p.parser.GetLiteralNames() != nil {
- if t >= len(p.parser.GetLiteralNames()) {
- fmt.Println(strconv.Itoa(t) + " ttype out of range: " + strings.Join(p.parser.GetLiteralNames(), ","))
- // fmt.Println(p.parser.GetInputStream().(TokenStream).GetAllText()) // p seems incorrect
- } else {
- return p.parser.GetLiteralNames()[t] + "<" + strconv.Itoa(t) + ">"
- }
+ if p.parser != nil && p.parser.GetLiteralNames() != nil && t < len(p.parser.GetLiteralNames()) {
+ return p.parser.GetLiteralNames()[t] + "<" + strconv.Itoa(t) + ">"
+ }
+
+ if p.parser != nil && p.parser.GetLiteralNames() != nil && t < len(p.parser.GetSymbolicNames()) {
+ return p.parser.GetSymbolicNames()[t] + "<" + strconv.Itoa(t) + ">"
}
return strconv.Itoa(t)
@@ -1372,9 +1385,9 @@ func (p *ParserATNSimulator) getLookaheadName(input TokenStream) string {
}
// Used for debugging in AdaptivePredict around execATN but I cut
-// it out for clarity now that alg. works well. We can leave p
-// "dead" code for a bit.
//
+// it out for clarity now that alg. works well. We can leave p
+// "dead" code for a bit.
func (p *ParserATNSimulator) dumpDeadEndConfigs(nvae *NoViableAltException) {
panic("Not implemented")
@@ -1421,7 +1434,6 @@ func (p *ParserATNSimulator) getUniqueAlt(configs ATNConfigSet) int {
return alt
}
-//
// Add an edge to the DFA, if possible. This method calls
// {@link //addDFAState} to ensure the {@code to} state is present in the
// DFA. If {@code from} is {@code nil}, or if {@code t} is outside the
@@ -1440,7 +1452,6 @@ func (p *ParserATNSimulator) getUniqueAlt(configs ATNConfigSet) int {
// @return If {@code to} is {@code nil}, p method returns {@code nil}
// otherwise p method returns the result of calling {@link //addDFAState}
// on {@code to}
-//
func (p *ParserATNSimulator) addDFAEdge(dfa *DFA, from *DFAState, t int, to *DFAState) *DFAState {
if ParserATNSimulatorDebug {
fmt.Println("EDGE " + from.String() + " -> " + to.String() + " upon " + p.GetTokenName(t))
@@ -1472,7 +1483,6 @@ func (p *ParserATNSimulator) addDFAEdge(dfa *DFA, from *DFAState, t int, to *DFA
return to
}
-//
// Add state {@code D} to the DFA if it is not already present, and return
// the actual instance stored in the DFA. If a state equivalent to {@code D}
// is already in the DFA, the existing state is returned. Otherwise p
@@ -1486,25 +1496,30 @@ func (p *ParserATNSimulator) addDFAEdge(dfa *DFA, from *DFAState, t int, to *DFA
// @return The state stored in the DFA. This will be either the existing
// state if {@code D} is already in the DFA, or {@code D} itself if the
// state was not already present.
-//
func (p *ParserATNSimulator) addDFAState(dfa *DFA, d *DFAState) *DFAState {
if d == ATNSimulatorError {
return d
}
- hash := d.hash()
- existing, ok := dfa.getState(hash)
- if ok {
+ existing, present := dfa.states.Get(d)
+ if present {
+ if ParserATNSimulatorTraceATNSim {
+ fmt.Print("addDFAState " + d.String() + " exists")
+ }
return existing
}
- d.stateNumber = dfa.numStates()
+
+ // The state was not present, so update it with configs
+ //
+ d.stateNumber = dfa.states.Len()
if !d.configs.ReadOnly() {
d.configs.OptimizeConfigs(p.BaseATNSimulator)
d.configs.SetReadOnly(true)
}
- dfa.setState(hash, d)
- if ParserATNSimulatorDebug {
- fmt.Println("adding NewDFA state: " + d.String())
+ dfa.states.Put(d)
+ if ParserATNSimulatorTraceATNSim {
+ fmt.Println("addDFAState new " + d.String())
}
+
return d
}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser_rule_context.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser_rule_context.go
similarity index 98%
rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser_rule_context.go
rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser_rule_context.go
index 49cd10c5ff..1c8cee7479 100644
--- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser_rule_context.go
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser_rule_context.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.
@@ -340,7 +340,7 @@ func (prc *BaseParserRuleContext) String(ruleNames []string, stop RuleContext) s
return s
}
-var RuleContextEmpty = NewBaseParserRuleContext(nil, -1)
+var ParserRuleContextEmpty = NewBaseParserRuleContext(nil, -1)
type InterpreterRuleContext interface {
ParserRuleContext
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/prediction_context.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/prediction_context.go
similarity index 81%
rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/prediction_context.go
rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/prediction_context.go
index 9fdfd52b26..ba62af3610 100644
--- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/prediction_context.go
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/prediction_context.go
@@ -1,10 +1,12 @@
-// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.
package antlr
import (
+ "fmt"
+ "golang.org/x/exp/slices"
"strconv"
)
@@ -26,10 +28,10 @@ var (
)
type PredictionContext interface {
- hash() int
+ Hash() int
+ Equals(interface{}) bool
GetParent(int) PredictionContext
getReturnState(int) int
- equals(PredictionContext) bool
length() int
isEmpty() bool
hasEmptyPath() bool
@@ -53,7 +55,7 @@ func (b *BasePredictionContext) isEmpty() bool {
func calculateHash(parent PredictionContext, returnState int) int {
h := murmurInit(1)
- h = murmurUpdate(h, parent.hash())
+ h = murmurUpdate(h, parent.Hash())
h = murmurUpdate(h, returnState)
return murmurFinish(h, 2)
}
@@ -86,7 +88,6 @@ func NewPredictionContextCache() *PredictionContextCache {
// Add a context to the cache and return it. If the context already exists,
// return that one instead and do not add a Newcontext to the cache.
// Protect shared cache from unsafe thread access.
-//
func (p *PredictionContextCache) add(ctx PredictionContext) PredictionContext {
if ctx == BasePredictionContextEMPTY {
return BasePredictionContextEMPTY
@@ -160,28 +161,28 @@ func (b *BaseSingletonPredictionContext) hasEmptyPath() bool {
return b.returnState == BasePredictionContextEmptyReturnState
}
-func (b *BaseSingletonPredictionContext) equals(other PredictionContext) bool {
+func (b *BaseSingletonPredictionContext) Hash() int {
+ return b.cachedHash
+}
+
+func (b *BaseSingletonPredictionContext) Equals(other interface{}) bool {
if b == other {
return true
- } else if _, ok := other.(*BaseSingletonPredictionContext); !ok {
+ }
+ if _, ok := other.(*BaseSingletonPredictionContext); !ok {
return false
- } else if b.hash() != other.hash() {
- return false // can't be same if hash is different
}
otherP := other.(*BaseSingletonPredictionContext)
- if b.returnState != other.getReturnState(0) {
+ if b.returnState != otherP.getReturnState(0) {
return false
- } else if b.parentCtx == nil {
+ }
+ if b.parentCtx == nil {
return otherP.parentCtx == nil
}
- return b.parentCtx.equals(otherP.parentCtx)
-}
-
-func (b *BaseSingletonPredictionContext) hash() int {
- return b.cachedHash
+ return b.parentCtx.Equals(otherP.parentCtx)
}
func (b *BaseSingletonPredictionContext) String() string {
@@ -215,7 +216,7 @@ func NewEmptyPredictionContext() *EmptyPredictionContext {
p := new(EmptyPredictionContext)
p.BaseSingletonPredictionContext = NewBaseSingletonPredictionContext(nil, BasePredictionContextEmptyReturnState)
-
+ p.cachedHash = calculateEmptyHash()
return p
}
@@ -231,7 +232,11 @@ func (e *EmptyPredictionContext) getReturnState(index int) int {
return e.returnState
}
-func (e *EmptyPredictionContext) equals(other PredictionContext) bool {
+func (e *EmptyPredictionContext) Hash() int {
+ return e.cachedHash
+}
+
+func (e *EmptyPredictionContext) Equals(other interface{}) bool {
return e == other
}
@@ -254,7 +259,7 @@ func NewArrayPredictionContext(parents []PredictionContext, returnStates []int)
hash := murmurInit(1)
for _, parent := range parents {
- hash = murmurUpdate(hash, parent.hash())
+ hash = murmurUpdate(hash, parent.Hash())
}
for _, returnState := range returnStates {
@@ -298,18 +303,31 @@ func (a *ArrayPredictionContext) getReturnState(index int) int {
return a.returnStates[index]
}
-func (a *ArrayPredictionContext) equals(other PredictionContext) bool {
- if _, ok := other.(*ArrayPredictionContext); !ok {
+// Equals is the default comparison function for ArrayPredictionContext when no specialized
+// implementation is needed for a collection
+func (a *ArrayPredictionContext) Equals(o interface{}) bool {
+ if a == o {
+ return true
+ }
+ other, ok := o.(*ArrayPredictionContext)
+ if !ok {
return false
- } else if a.cachedHash != other.hash() {
+ }
+ if a.cachedHash != other.Hash() {
return false // can't be same if hash is different
- } else {
- otherP := other.(*ArrayPredictionContext)
- return &a.returnStates == &otherP.returnStates && &a.parents == &otherP.parents
}
+
+ // Must compare the actual array elements and not just the array address
+ //
+ return slices.Equal(a.returnStates, other.returnStates) &&
+ slices.EqualFunc(a.parents, other.parents, func(x, y PredictionContext) bool {
+ return x.Equals(y)
+ })
}
-func (a *ArrayPredictionContext) hash() int {
+// Hash is the default hash function for ArrayPredictionContext when no specialized
+// implementation is needed for a collection
+func (a *ArrayPredictionContext) Hash() int {
return a.BasePredictionContext.cachedHash
}
@@ -343,11 +361,11 @@ func (a *ArrayPredictionContext) String() string {
// /
func predictionContextFromRuleContext(a *ATN, outerContext RuleContext) PredictionContext {
if outerContext == nil {
- outerContext = RuleContextEmpty
+ outerContext = ParserRuleContextEmpty
}
// if we are in RuleContext of start rule, s, then BasePredictionContext
// is EMPTY. Nobody called us. (if we are empty, return empty)
- if outerContext.GetParent() == nil || outerContext == RuleContextEmpty {
+ if outerContext.GetParent() == nil || outerContext == ParserRuleContextEmpty {
return BasePredictionContextEMPTY
}
// If we have a parent, convert it to a BasePredictionContext graph
@@ -359,11 +377,20 @@ func predictionContextFromRuleContext(a *ATN, outerContext RuleContext) Predicti
}
func merge(a, b PredictionContext, rootIsWildcard bool, mergeCache *DoubleDict) PredictionContext {
- // share same graph if both same
- if a == b {
+
+ // Share same graph if both same
+ //
+ if a == b || a.Equals(b) {
return a
}
+ // In Java, EmptyPredictionContext inherits from SingletonPredictionContext, and so the test
+ // in java for SingletonPredictionContext will succeed and a new ArrayPredictionContext will be created
+ // from it.
+ // In go, EmptyPredictionContext does not equate to SingletonPredictionContext and so that conversion
+ // will fail. We need to test for both Empty and Singleton and create an ArrayPredictionContext from
+ // either of them.
+
ac, ok1 := a.(*BaseSingletonPredictionContext)
bc, ok2 := b.(*BaseSingletonPredictionContext)
@@ -380,17 +407,32 @@ func merge(a, b PredictionContext, rootIsWildcard bool, mergeCache *DoubleDict)
return b
}
}
- // convert singleton so both are arrays to normalize
- if _, ok := a.(*BaseSingletonPredictionContext); ok {
- a = NewArrayPredictionContext([]PredictionContext{a.GetParent(0)}, []int{a.getReturnState(0)})
+
+ // Convert Singleton or Empty so both are arrays to normalize - We should not use the existing parameters
+ // here.
+ //
+ // TODO: I think that maybe the Prediction Context structs should be redone as there is a chance we will see this mess again - maybe redo the logic here
+
+ var arp, arb *ArrayPredictionContext
+ var ok bool
+ if arp, ok = a.(*ArrayPredictionContext); ok {
+ } else if _, ok = a.(*BaseSingletonPredictionContext); ok {
+ arp = NewArrayPredictionContext([]PredictionContext{a.GetParent(0)}, []int{a.getReturnState(0)})
+ } else if _, ok = a.(*EmptyPredictionContext); ok {
+ arp = NewArrayPredictionContext([]PredictionContext{}, []int{})
}
- if _, ok := b.(*BaseSingletonPredictionContext); ok {
- b = NewArrayPredictionContext([]PredictionContext{b.GetParent(0)}, []int{b.getReturnState(0)})
+
+ if arb, ok = b.(*ArrayPredictionContext); ok {
+ } else if _, ok = b.(*BaseSingletonPredictionContext); ok {
+ arb = NewArrayPredictionContext([]PredictionContext{b.GetParent(0)}, []int{b.getReturnState(0)})
+ } else if _, ok = b.(*EmptyPredictionContext); ok {
+ arb = NewArrayPredictionContext([]PredictionContext{}, []int{})
}
- return mergeArrays(a.(*ArrayPredictionContext), b.(*ArrayPredictionContext), rootIsWildcard, mergeCache)
+
+ // Both arp and arb
+ return mergeArrays(arp, arb, rootIsWildcard, mergeCache)
}
-//
// Merge two {@link SingletonBasePredictionContext} instances.
//
// Stack tops equal, parents merge is same return left graph.
@@ -423,11 +465,11 @@ func merge(a, b PredictionContext, rootIsWildcard bool, mergeCache *DoubleDict)
// /
func mergeSingletons(a, b *BaseSingletonPredictionContext, rootIsWildcard bool, mergeCache *DoubleDict) PredictionContext {
if mergeCache != nil {
- previous := mergeCache.Get(a.hash(), b.hash())
+ previous := mergeCache.Get(a.Hash(), b.Hash())
if previous != nil {
return previous.(PredictionContext)
}
- previous = mergeCache.Get(b.hash(), a.hash())
+ previous = mergeCache.Get(b.Hash(), a.Hash())
if previous != nil {
return previous.(PredictionContext)
}
@@ -436,7 +478,7 @@ func mergeSingletons(a, b *BaseSingletonPredictionContext, rootIsWildcard bool,
rootMerge := mergeRoot(a, b, rootIsWildcard)
if rootMerge != nil {
if mergeCache != nil {
- mergeCache.set(a.hash(), b.hash(), rootMerge)
+ mergeCache.set(a.Hash(), b.Hash(), rootMerge)
}
return rootMerge
}
@@ -456,7 +498,7 @@ func mergeSingletons(a, b *BaseSingletonPredictionContext, rootIsWildcard bool,
// Newjoined parent so create Newsingleton pointing to it, a'
spc := SingletonBasePredictionContextCreate(parent, a.returnState)
if mergeCache != nil {
- mergeCache.set(a.hash(), b.hash(), spc)
+ mergeCache.set(a.Hash(), b.Hash(), spc)
}
return spc
}
@@ -478,7 +520,7 @@ func mergeSingletons(a, b *BaseSingletonPredictionContext, rootIsWildcard bool,
parents := []PredictionContext{singleParent, singleParent}
apc := NewArrayPredictionContext(parents, payloads)
if mergeCache != nil {
- mergeCache.set(a.hash(), b.hash(), apc)
+ mergeCache.set(a.Hash(), b.Hash(), apc)
}
return apc
}
@@ -494,12 +536,11 @@ func mergeSingletons(a, b *BaseSingletonPredictionContext, rootIsWildcard bool,
}
apc := NewArrayPredictionContext(parents, payloads)
if mergeCache != nil {
- mergeCache.set(a.hash(), b.hash(), apc)
+ mergeCache.set(a.Hash(), b.Hash(), apc)
}
return apc
}
-//
// Handle case where at least one of {@code a} or {@code b} is
// {@link //EMPTY}. In the following diagrams, the symbol {@code $} is used
// to represent {@link //EMPTY}.
@@ -561,7 +602,6 @@ func mergeRoot(a, b SingletonPredictionContext, rootIsWildcard bool) PredictionC
return nil
}
-//
// Merge two {@link ArrayBasePredictionContext} instances.
//
//
Different tops, different parents.
@@ -583,12 +623,18 @@ func mergeRoot(a, b SingletonPredictionContext, rootIsWildcard bool) PredictionC
// /
func mergeArrays(a, b *ArrayPredictionContext, rootIsWildcard bool, mergeCache *DoubleDict) PredictionContext {
if mergeCache != nil {
- previous := mergeCache.Get(a.hash(), b.hash())
+ previous := mergeCache.Get(a.Hash(), b.Hash())
if previous != nil {
+ if ParserATNSimulatorTraceATNSim {
+ fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> previous")
+ }
return previous.(PredictionContext)
}
- previous = mergeCache.Get(b.hash(), a.hash())
+ previous = mergeCache.Get(b.Hash(), a.Hash())
if previous != nil {
+ if ParserATNSimulatorTraceATNSim {
+ fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> previous")
+ }
return previous.(PredictionContext)
}
}
@@ -608,7 +654,7 @@ func mergeArrays(a, b *ArrayPredictionContext, rootIsWildcard bool, mergeCache *
payload := a.returnStates[i]
// $+$ = $
bothDollars := payload == BasePredictionContextEmptyReturnState && aParent == nil && bParent == nil
- axAX := (aParent != nil && bParent != nil && aParent == bParent) // ax+ax
+ axAX := aParent != nil && bParent != nil && aParent == bParent // ax+ax
// ->
// ax
if bothDollars || axAX {
@@ -651,7 +697,7 @@ func mergeArrays(a, b *ArrayPredictionContext, rootIsWildcard bool, mergeCache *
if k == 1 { // for just one merged element, return singleton top
pc := SingletonBasePredictionContextCreate(mergedParents[0], mergedReturnStates[0])
if mergeCache != nil {
- mergeCache.set(a.hash(), b.hash(), pc)
+ mergeCache.set(a.Hash(), b.Hash(), pc)
}
return pc
}
@@ -663,27 +709,36 @@ func mergeArrays(a, b *ArrayPredictionContext, rootIsWildcard bool, mergeCache *
// if we created same array as a or b, return that instead
// TODO: track whether this is possible above during merge sort for speed
+ // TODO: In go, I do not think we can just do M == xx as M is a brand new allocation. This could be causing allocation problems
if M == a {
if mergeCache != nil {
- mergeCache.set(a.hash(), b.hash(), a)
+ mergeCache.set(a.Hash(), b.Hash(), a)
+ }
+ if ParserATNSimulatorTraceATNSim {
+ fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> a")
}
return a
}
if M == b {
if mergeCache != nil {
- mergeCache.set(a.hash(), b.hash(), b)
+ mergeCache.set(a.Hash(), b.Hash(), b)
+ }
+ if ParserATNSimulatorTraceATNSim {
+ fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> b")
}
return b
}
combineCommonParents(mergedParents)
if mergeCache != nil {
- mergeCache.set(a.hash(), b.hash(), M)
+ mergeCache.set(a.Hash(), b.Hash(), M)
+ }
+ if ParserATNSimulatorTraceATNSim {
+ fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> " + M.String())
}
return M
}
-//
// Make pass over all M {@code parents} merge any {@code equals()}
// ones.
// /
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/prediction_mode.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/prediction_mode.go
similarity index 95%
rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/prediction_mode.go
rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/prediction_mode.go
index 15718f912b..7b9b72fab1 100644
--- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/prediction_mode.go
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/prediction_mode.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.
@@ -70,7 +70,6 @@ const (
PredictionModeLLExactAmbigDetection = 2
)
-//
// Computes the SLL prediction termination condition.
//
//
@@ -108,9 +107,9 @@ const (
// The single-alt-state thing lets prediction continue upon rules like
// (otherwise, it would admit defeat too soon):
//
-// {@code [12|1|[], 6|2|[], 12|2|[]]. s : (ID | ID ID?) '' }
+// {@code [12|1|[], 6|2|[], 12|2|[]]. s : (ID | ID ID?) ” }
//
-// When the ATN simulation reaches the state before {@code ''}, it has a
+//
When the ATN simulation reaches the state before {@code ”}, it has a
// DFA state that looks like: {@code [12|1|[], 6|2|[], 12|2|[]]}. Naturally
// {@code 12|1|[]} and {@code 12|2|[]} conflict, but we cannot stop
// processing this node because alternative to has another way to continue,
@@ -152,16 +151,15 @@ const (
//
//
Before testing these configurations against others, we have to merge
// {@code x} and {@code x'} (without modifying the existing configurations).
-// For example, we test {@code (x+x')==x''} when looking for conflicts in
+// For example, we test {@code (x+x')==x”} when looking for conflicts in
// the following configurations.
//
-// {@code (s, 1, x, {}), (s, 1, x', {p}), (s, 2, x'', {})}
+// {@code (s, 1, x, {}), (s, 1, x', {p}), (s, 2, x”, {})}
//
// If the configuration set has predicates (as indicated by
// {@link ATNConfigSet//hasSemanticContext}), this algorithm makes a copy of
// the configurations to strip out all of the predicates so that a standard
// {@link ATNConfigSet} will merge everything ignoring predicates.
-//
func PredictionModehasSLLConflictTerminatingPrediction(mode int, configs ATNConfigSet) bool {
// Configs in rule stop states indicate reaching the end of the decision
// rule (local context) or end of start rule (full context). If all
@@ -229,7 +227,6 @@ func PredictionModeallConfigsInRuleStopStates(configs ATNConfigSet) bool {
return true
}
-//
// Full LL prediction termination.
//
// Can we stop looking ahead during ATN simulation or is there some
@@ -334,7 +331,7 @@ func PredictionModeallConfigsInRuleStopStates(configs ATNConfigSet) bool {
//
//
//
{@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 1, y)},
-// {@code (s', 2, y)}, {@code (s'', 1, z)} yields non-conflicting set
+// {@code (s', 2, y)}, {@code (s”, 1, z)} yields non-conflicting set
// {@code {1}} U conflicting sets {@code min({1,2})} U {@code min({1,2})} =
// {@code {1}} => stop and predict 1
//
@@ -369,31 +366,26 @@ func PredictionModeallConfigsInRuleStopStates(configs ATNConfigSet) bool {
// two or one and three so we keep going. We can only stop prediction when
// we need exact ambiguity detection when the sets look like
// {@code A={{1,2}}} or {@code {{1,2},{1,2}}}, etc...
-//
func PredictionModeresolvesToJustOneViableAlt(altsets []*BitSet) int {
return PredictionModegetSingleViableAlt(altsets)
}
-//
// Determines if every alternative subset in {@code altsets} contains more
// than one alternative.
//
// @param altsets a collection of alternative subsets
// @return {@code true} if every {@link BitSet} in {@code altsets} has
// {@link BitSet//cardinality cardinality} > 1, otherwise {@code false}
-//
func PredictionModeallSubsetsConflict(altsets []*BitSet) bool {
return !PredictionModehasNonConflictingAltSet(altsets)
}
-//
// Determines if any single alternative subset in {@code altsets} contains
// exactly one alternative.
//
// @param altsets a collection of alternative subsets
// @return {@code true} if {@code altsets} contains a {@link BitSet} with
// {@link BitSet//cardinality cardinality} 1, otherwise {@code false}
-//
func PredictionModehasNonConflictingAltSet(altsets []*BitSet) bool {
for i := 0; i < len(altsets); i++ {
alts := altsets[i]
@@ -404,14 +396,12 @@ func PredictionModehasNonConflictingAltSet(altsets []*BitSet) bool {
return false
}
-//
// Determines if any single alternative subset in {@code altsets} contains
// more than one alternative.
//
// @param altsets a collection of alternative subsets
// @return {@code true} if {@code altsets} contains a {@link BitSet} with
// {@link BitSet//cardinality cardinality} > 1, otherwise {@code false}
-//
func PredictionModehasConflictingAltSet(altsets []*BitSet) bool {
for i := 0; i < len(altsets); i++ {
alts := altsets[i]
@@ -422,13 +412,11 @@ func PredictionModehasConflictingAltSet(altsets []*BitSet) bool {
return false
}
-//
// Determines if every alternative subset in {@code altsets} is equivalent.
//
// @param altsets a collection of alternative subsets
// @return {@code true} if every member of {@code altsets} is equal to the
// others, otherwise {@code false}
-//
func PredictionModeallSubsetsEqual(altsets []*BitSet) bool {
var first *BitSet
@@ -444,13 +432,11 @@ func PredictionModeallSubsetsEqual(altsets []*BitSet) bool {
return true
}
-//
// Returns the unique alternative predicted by all alternative subsets in
// {@code altsets}. If no such alternative exists, this method returns
// {@link ATN//INVALID_ALT_NUMBER}.
//
// @param altsets a collection of alternative subsets
-//
func PredictionModegetUniqueAlt(altsets []*BitSet) int {
all := PredictionModeGetAlts(altsets)
if all.length() == 1 {
@@ -466,7 +452,6 @@ func PredictionModegetUniqueAlt(altsets []*BitSet) int {
//
// @param altsets a collection of alternative subsets
// @return the set of represented alternatives in {@code altsets}
-//
func PredictionModeGetAlts(altsets []*BitSet) *BitSet {
all := NewBitSet()
for _, alts := range altsets {
@@ -475,44 +460,35 @@ func PredictionModeGetAlts(altsets []*BitSet) *BitSet {
return all
}
-//
-// This func gets the conflicting alt subsets from a configuration set.
+// PredictionModegetConflictingAltSubsets gets the conflicting alt subsets from a configuration set.
// For each configuration {@code c} in {@code configs}:
//
//
// map[c] U= c.{@link ATNConfig//alt alt} // map hash/equals uses s and x, not
// alt and not pred
//
-//
func PredictionModegetConflictingAltSubsets(configs ATNConfigSet) []*BitSet {
- configToAlts := make(map[int]*BitSet)
+ configToAlts := NewJMap[ATNConfig, *BitSet, *ATNAltConfigComparator[ATNConfig]](atnAltCfgEqInst)
for _, c := range configs.GetItems() {
- key := 31 * c.GetState().GetStateNumber() + c.GetContext().hash()
- alts, ok := configToAlts[key]
+ alts, ok := configToAlts.Get(c)
if !ok {
alts = NewBitSet()
- configToAlts[key] = alts
+ configToAlts.Put(c, alts)
}
alts.add(c.GetAlt())
}
- values := make([]*BitSet, 0, 10)
- for _, v := range configToAlts {
- values = append(values, v)
- }
- return values
+ return configToAlts.Values()
}
-//
-// Get a map from state to alt subset from a configuration set. For each
+// PredictionModeGetStateToAltMap gets a map from state to alt subset from a configuration set. For each
// configuration {@code c} in {@code configs}:
//
//
// map[c.{@link ATNConfig//state state}] U= c.{@link ATNConfig//alt alt}
//
-//
func PredictionModeGetStateToAltMap(configs ATNConfigSet) *AltDict {
m := NewAltDict()
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/recognizer.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/recognizer.go
similarity index 92%
rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/recognizer.go
rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/recognizer.go
index 93efcf355d..bfe542d091 100644
--- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/recognizer.go
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/recognizer.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.
@@ -49,7 +49,7 @@ var tokenTypeMapCache = make(map[string]int)
var ruleIndexMapCache = make(map[string]int)
func (b *BaseRecognizer) checkVersion(toolVersion string) {
- runtimeVersion := "4.10.1"
+ runtimeVersion := "4.12.0"
if runtimeVersion != toolVersion {
fmt.Println("ANTLR runtime and generated code versions disagree: " + runtimeVersion + "!=" + toolVersion)
}
@@ -108,7 +108,6 @@ func (b *BaseRecognizer) SetState(v int) {
// Get a map from rule names to rule indexes.
//
// Used for XPath and tree pattern compilation.
-//
func (b *BaseRecognizer) GetRuleIndexMap() map[string]int {
panic("Method not defined!")
@@ -171,18 +170,18 @@ func (b *BaseRecognizer) GetErrorHeader(e RecognitionException) string {
}
// How should a token be displayed in an error message? The default
-// is to display just the text, but during development you might
-// want to have a lot of information spit out. Override in that case
-// to use t.String() (which, for CommonToken, dumps everything about
-// the token). This is better than forcing you to override a method in
-// your token objects because you don't have to go modify your lexer
-// so that it creates a NewJava type.
+//
+// is to display just the text, but during development you might
+// want to have a lot of information spit out. Override in that case
+// to use t.String() (which, for CommonToken, dumps everything about
+// the token). This is better than forcing you to override a method in
+// your token objects because you don't have to go modify your lexer
+// so that it creates a NewJava type.
//
// @deprecated This method is not called by the ANTLR 4 Runtime. Specific
// implementations of {@link ANTLRErrorStrategy} may provide a similar
// feature when necessary. For example, see
// {@link DefaultErrorStrategy//GetTokenErrorDisplay}.
-//
func (b *BaseRecognizer) GetTokenErrorDisplay(t Token) string {
if t == nil {
return ""
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/rule_context.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/rule_context.go
similarity index 97%
rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/rule_context.go
rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/rule_context.go
index 600cf8c062..210699ba23 100644
--- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/rule_context.go
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/rule_context.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/semantic_context.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/semantic_context.go
similarity index 85%
rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/semantic_context.go
rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/semantic_context.go
index 9ada430779..a702e99def 100644
--- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/semantic_context.go
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/semantic_context.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.
@@ -18,12 +18,12 @@ import (
//
type SemanticContext interface {
- comparable
+ Equals(other Collectable[SemanticContext]) bool
+ Hash() int
evaluate(parser Recognizer, outerContext RuleContext) bool
evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext
- hash() int
String() string
}
@@ -78,7 +78,7 @@ func NewPredicate(ruleIndex, predIndex int, isCtxDependent bool) *Predicate {
//The default {@link SemanticContext}, which is semantically equivalent to
//a predicate of the form {@code {true}?}.
-var SemanticContextNone SemanticContext = NewPredicate(-1, -1, false)
+var SemanticContextNone = NewPredicate(-1, -1, false)
func (p *Predicate) evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext {
return p
@@ -95,7 +95,7 @@ func (p *Predicate) evaluate(parser Recognizer, outerContext RuleContext) bool {
return parser.Sempred(localctx, p.ruleIndex, p.predIndex)
}
-func (p *Predicate) equals(other interface{}) bool {
+func (p *Predicate) Equals(other Collectable[SemanticContext]) bool {
if p == other {
return true
} else if _, ok := other.(*Predicate); !ok {
@@ -107,7 +107,7 @@ func (p *Predicate) equals(other interface{}) bool {
}
}
-func (p *Predicate) hash() int {
+func (p *Predicate) Hash() int {
h := murmurInit(0)
h = murmurUpdate(h, p.ruleIndex)
h = murmurUpdate(h, p.predIndex)
@@ -151,17 +151,22 @@ func (p *PrecedencePredicate) compareTo(other *PrecedencePredicate) int {
return p.precedence - other.precedence
}
-func (p *PrecedencePredicate) equals(other interface{}) bool {
- if p == other {
- return true
- } else if _, ok := other.(*PrecedencePredicate); !ok {
+func (p *PrecedencePredicate) Equals(other Collectable[SemanticContext]) bool {
+
+ var op *PrecedencePredicate
+ var ok bool
+ if op, ok = other.(*PrecedencePredicate); !ok {
return false
- } else {
- return p.precedence == other.(*PrecedencePredicate).precedence
}
+
+ if p == op {
+ return true
+ }
+
+ return p.precedence == other.(*PrecedencePredicate).precedence
}
-func (p *PrecedencePredicate) hash() int {
+func (p *PrecedencePredicate) Hash() int {
h := uint32(1)
h = 31*h + uint32(p.precedence)
return int(h)
@@ -171,10 +176,10 @@ func (p *PrecedencePredicate) String() string {
return "{" + strconv.Itoa(p.precedence) + ">=prec}?"
}
-func PrecedencePredicatefilterPrecedencePredicates(set Set) []*PrecedencePredicate {
+func PrecedencePredicatefilterPrecedencePredicates(set *JStore[SemanticContext, Comparator[SemanticContext]]) []*PrecedencePredicate {
result := make([]*PrecedencePredicate, 0)
- set.Each(func(v interface{}) bool {
+ set.Each(func(v SemanticContext) bool {
if c2, ok := v.(*PrecedencePredicate); ok {
result = append(result, c2)
}
@@ -193,21 +198,21 @@ type AND struct {
func NewAND(a, b SemanticContext) *AND {
- operands := newArray2DHashSet(nil, nil)
+ operands := NewJStore[SemanticContext, Comparator[SemanticContext]](semctxEqInst)
if aa, ok := a.(*AND); ok {
for _, o := range aa.opnds {
- operands.Add(o)
+ operands.Put(o)
}
} else {
- operands.Add(a)
+ operands.Put(a)
}
if ba, ok := b.(*AND); ok {
for _, o := range ba.opnds {
- operands.Add(o)
+ operands.Put(o)
}
} else {
- operands.Add(b)
+ operands.Put(b)
}
precedencePredicates := PrecedencePredicatefilterPrecedencePredicates(operands)
if len(precedencePredicates) > 0 {
@@ -220,7 +225,7 @@ func NewAND(a, b SemanticContext) *AND {
}
}
- operands.Add(reduced)
+ operands.Put(reduced)
}
vs := operands.Values()
@@ -235,14 +240,15 @@ func NewAND(a, b SemanticContext) *AND {
return and
}
-func (a *AND) equals(other interface{}) bool {
+func (a *AND) Equals(other Collectable[SemanticContext]) bool {
if a == other {
return true
- } else if _, ok := other.(*AND); !ok {
+ }
+ if _, ok := other.(*AND); !ok {
return false
} else {
for i, v := range other.(*AND).opnds {
- if !a.opnds[i].equals(v) {
+ if !a.opnds[i].Equals(v) {
return false
}
}
@@ -250,13 +256,11 @@ func (a *AND) equals(other interface{}) bool {
}
}
-//
// {@inheritDoc}
//
//
// The evaluation of predicates by a context is short-circuiting, but
// unordered.
-//
func (a *AND) evaluate(parser Recognizer, outerContext RuleContext) bool {
for i := 0; i < len(a.opnds); i++ {
if !a.opnds[i].evaluate(parser, outerContext) {
@@ -304,18 +308,18 @@ func (a *AND) evalPrecedence(parser Recognizer, outerContext RuleContext) Semant
return result
}
-func (a *AND) hash() int {
+func (a *AND) Hash() int {
h := murmurInit(37) // Init with a value different from OR
for _, op := range a.opnds {
- h = murmurUpdate(h, op.hash())
+ h = murmurUpdate(h, op.Hash())
}
return murmurFinish(h, len(a.opnds))
}
-func (a *OR) hash() int {
+func (a *OR) Hash() int {
h := murmurInit(41) // Init with a value different from AND
for _, op := range a.opnds {
- h = murmurUpdate(h, op.hash())
+ h = murmurUpdate(h, op.Hash())
}
return murmurFinish(h, len(a.opnds))
}
@@ -345,21 +349,21 @@ type OR struct {
func NewOR(a, b SemanticContext) *OR {
- operands := newArray2DHashSet(nil, nil)
+ operands := NewJStore[SemanticContext, Comparator[SemanticContext]](semctxEqInst)
if aa, ok := a.(*OR); ok {
for _, o := range aa.opnds {
- operands.Add(o)
+ operands.Put(o)
}
} else {
- operands.Add(a)
+ operands.Put(a)
}
if ba, ok := b.(*OR); ok {
for _, o := range ba.opnds {
- operands.Add(o)
+ operands.Put(o)
}
} else {
- operands.Add(b)
+ operands.Put(b)
}
precedencePredicates := PrecedencePredicatefilterPrecedencePredicates(operands)
if len(precedencePredicates) > 0 {
@@ -372,7 +376,7 @@ func NewOR(a, b SemanticContext) *OR {
}
}
- operands.Add(reduced)
+ operands.Put(reduced)
}
vs := operands.Values()
@@ -388,14 +392,14 @@ func NewOR(a, b SemanticContext) *OR {
return o
}
-func (o *OR) equals(other interface{}) bool {
+func (o *OR) Equals(other Collectable[SemanticContext]) bool {
if o == other {
return true
} else if _, ok := other.(*OR); !ok {
return false
} else {
for i, v := range other.(*OR).opnds {
- if !o.opnds[i].equals(v) {
+ if !o.opnds[i].Equals(v) {
return false
}
}
@@ -406,7 +410,6 @@ func (o *OR) equals(other interface{}) bool {
//
// The evaluation of predicates by o context is short-circuiting, but
// unordered.
-//
func (o *OR) evaluate(parser Recognizer, outerContext RuleContext) bool {
for i := 0; i < len(o.opnds); i++ {
if o.opnds[i].evaluate(parser, outerContext) {
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/token.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/token.go
similarity index 98%
rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/token.go
rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/token.go
index 2d8e99095d..f73b06bc6a 100644
--- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/token.go
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/token.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.
@@ -158,7 +158,6 @@ func NewCommonToken(source *TokenSourceCharStreamPair, tokenType, channel, start
// {@link Token//GetInputStream}.
//
// @param oldToken The token to copy.
-//
func (c *CommonToken) clone() *CommonToken {
t := NewCommonToken(c.source, c.tokenType, c.channel, c.start, c.stop)
t.tokenIndex = c.GetTokenIndex()
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/token_source.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/token_source.go
similarity index 85%
rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/token_source.go
rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/token_source.go
index e023978fef..a3f36eaa67 100644
--- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/token_source.go
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/token_source.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/token_stream.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/token_stream.go
similarity index 87%
rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/token_stream.go
rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/token_stream.go
index df92c81478..1527d43f60 100644
--- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/token_stream.go
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/token_stream.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/tokenstream_rewriter.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/tokenstream_rewriter.go
similarity index 58%
rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/tokenstream_rewriter.go
rename to vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/tokenstream_rewriter.go
index 96a03f02aa..b3e38af344 100644
--- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/tokenstream_rewriter.go
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/tokenstream_rewriter.go
@@ -1,15 +1,15 @@
-// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
// Use of this file is governed by the BSD 3-clause license that
// can be found in the LICENSE.txt file in the project root.
+
package antlr
import (
-"bytes"
-"fmt"
+ "bytes"
+ "fmt"
)
-
-//
+//
// Useful for rewriting out a buffered input token stream after doing some
// augmentation or other manipulations on it.
@@ -85,12 +85,10 @@ import (
// If you don't use named rewrite streams, a "default" stream is used as the
// first example shows.
-
-
-const(
+const (
Default_Program_Name = "default"
- Program_Init_Size = 100
- Min_Token_Index = 0
+ Program_Init_Size = 100
+ Min_Token_Index = 0
)
// Define the rewrite operation hierarchy
@@ -98,13 +96,13 @@ const(
type RewriteOperation interface {
// Execute the rewrite operation by possibly adding to the buffer.
// Return the index of the next token to operate on.
- Execute(buffer *bytes.Buffer) int
- String() string
- GetInstructionIndex() int
- GetIndex() int
- GetText() string
- GetOpName() string
- GetTokens() TokenStream
+ Execute(buffer *bytes.Buffer) int
+ String() string
+ GetInstructionIndex() int
+ GetIndex() int
+ GetText() string
+ GetOpName() string
+ GetTokens() TokenStream
SetInstructionIndex(val int)
SetIndex(int)
SetText(string)
@@ -114,63 +112,62 @@ type RewriteOperation interface {
type BaseRewriteOperation struct {
//Current index of rewrites list
- instruction_index int
+ instruction_index int
//Token buffer index
- index int
+ index int
//Substitution text
- text string
+ text string
//Actual operation name
- op_name string
+ op_name string
//Pointer to token steam
- tokens TokenStream
+ tokens TokenStream
}
-func (op *BaseRewriteOperation)GetInstructionIndex() int{
+func (op *BaseRewriteOperation) GetInstructionIndex() int {
return op.instruction_index
}
-func (op *BaseRewriteOperation)GetIndex() int{
+func (op *BaseRewriteOperation) GetIndex() int {
return op.index
}
-func (op *BaseRewriteOperation)GetText() string{
+func (op *BaseRewriteOperation) GetText() string {
return op.text
}
-func (op *BaseRewriteOperation)GetOpName() string{
+func (op *BaseRewriteOperation) GetOpName() string {
return op.op_name
}
-func (op *BaseRewriteOperation)GetTokens() TokenStream{
+func (op *BaseRewriteOperation) GetTokens() TokenStream {
return op.tokens
}
-func (op *BaseRewriteOperation)SetInstructionIndex(val int){
+func (op *BaseRewriteOperation) SetInstructionIndex(val int) {
op.instruction_index = val
}
-func (op *BaseRewriteOperation)SetIndex(val int) {
+func (op *BaseRewriteOperation) SetIndex(val int) {
op.index = val
}
-func (op *BaseRewriteOperation)SetText(val string){
+func (op *BaseRewriteOperation) SetText(val string) {
op.text = val
}
-func (op *BaseRewriteOperation)SetOpName(val string){
+func (op *BaseRewriteOperation) SetOpName(val string) {
op.op_name = val
}
-func (op *BaseRewriteOperation)SetTokens(val TokenStream) {
+func (op *BaseRewriteOperation) SetTokens(val TokenStream) {
op.tokens = val
}
-
-func (op *BaseRewriteOperation) Execute(buffer *bytes.Buffer) int{
+func (op *BaseRewriteOperation) Execute(buffer *bytes.Buffer) int {
return op.index
}
-func (op *BaseRewriteOperation) String() string {
+func (op *BaseRewriteOperation) String() string {
return fmt.Sprintf("<%s@%d:\"%s\">",
op.op_name,
op.tokens.Get(op.GetIndex()),
@@ -179,26 +176,25 @@ func (op *BaseRewriteOperation) String() string {
}
-
type InsertBeforeOp struct {
BaseRewriteOperation
}
-func NewInsertBeforeOp(index int, text string, stream TokenStream) *InsertBeforeOp{
- return &InsertBeforeOp{BaseRewriteOperation:BaseRewriteOperation{
- index:index,
- text:text,
- op_name:"InsertBeforeOp",
- tokens:stream,
+func NewInsertBeforeOp(index int, text string, stream TokenStream) *InsertBeforeOp {
+ return &InsertBeforeOp{BaseRewriteOperation: BaseRewriteOperation{
+ index: index,
+ text: text,
+ op_name: "InsertBeforeOp",
+ tokens: stream,
}}
}
-func (op *InsertBeforeOp) Execute(buffer *bytes.Buffer) int{
+func (op *InsertBeforeOp) Execute(buffer *bytes.Buffer) int {
buffer.WriteString(op.text)
- if op.tokens.Get(op.index).GetTokenType() != TokenEOF{
+ if op.tokens.Get(op.index).GetTokenType() != TokenEOF {
buffer.WriteString(op.tokens.Get(op.index).GetText())
}
- return op.index+1
+ return op.index + 1
}
func (op *InsertBeforeOp) String() string {
@@ -213,20 +209,20 @@ type InsertAfterOp struct {
BaseRewriteOperation
}
-func NewInsertAfterOp(index int, text string, stream TokenStream) *InsertAfterOp{
- return &InsertAfterOp{BaseRewriteOperation:BaseRewriteOperation{
- index:index+1,
- text:text,
- tokens:stream,
+func NewInsertAfterOp(index int, text string, stream TokenStream) *InsertAfterOp {
+ return &InsertAfterOp{BaseRewriteOperation: BaseRewriteOperation{
+ index: index + 1,
+ text: text,
+ tokens: stream,
}}
}
func (op *InsertAfterOp) Execute(buffer *bytes.Buffer) int {
buffer.WriteString(op.text)
- if op.tokens.Get(op.index).GetTokenType() != TokenEOF{
+ if op.tokens.Get(op.index).GetTokenType() != TokenEOF {
buffer.WriteString(op.tokens.Get(op.index).GetText())
}
- return op.index+1
+ return op.index + 1
}
func (op *InsertAfterOp) String() string {
@@ -235,28 +231,28 @@ func (op *InsertAfterOp) String() string {
// I'm going to try replacing range from x..y with (y-x)+1 ReplaceOp
// instructions.
-type ReplaceOp struct{
+type ReplaceOp struct {
BaseRewriteOperation
LastIndex int
}
-func NewReplaceOp(from, to int, text string, stream TokenStream)*ReplaceOp {
+func NewReplaceOp(from, to int, text string, stream TokenStream) *ReplaceOp {
return &ReplaceOp{
- BaseRewriteOperation:BaseRewriteOperation{
- index:from,
- text:text,
- op_name:"ReplaceOp",
- tokens:stream,
+ BaseRewriteOperation: BaseRewriteOperation{
+ index: from,
+ text: text,
+ op_name: "ReplaceOp",
+ tokens: stream,
},
- LastIndex:to,
+ LastIndex: to,
}
}
-func (op *ReplaceOp)Execute(buffer *bytes.Buffer) int{
- if op.text != ""{
+func (op *ReplaceOp) Execute(buffer *bytes.Buffer) int {
+ if op.text != "" {
buffer.WriteString(op.text)
}
- return op.LastIndex +1
+ return op.LastIndex + 1
}
func (op *ReplaceOp) String() string {
@@ -268,54 +264,54 @@ func (op *ReplaceOp) String() string {
op.tokens.Get(op.index), op.tokens.Get(op.LastIndex), op.text)
}
-
type TokenStreamRewriter struct {
//Our source stream
- tokens TokenStream
+ tokens TokenStream
// You may have multiple, named streams of rewrite operations.
// I'm calling these things "programs."
// Maps String (name) → rewrite (List)
- programs map[string][]RewriteOperation
- last_rewrite_token_indexes map[string]int
+ programs map[string][]RewriteOperation
+ last_rewrite_token_indexes map[string]int
}
-func NewTokenStreamRewriter(tokens TokenStream) *TokenStreamRewriter{
+func NewTokenStreamRewriter(tokens TokenStream) *TokenStreamRewriter {
return &TokenStreamRewriter{
- tokens: tokens,
- programs: map[string][]RewriteOperation{
- Default_Program_Name:make([]RewriteOperation,0, Program_Init_Size),
+ tokens: tokens,
+ programs: map[string][]RewriteOperation{
+ Default_Program_Name: make([]RewriteOperation, 0, Program_Init_Size),
},
- last_rewrite_token_indexes: map[string]int{},
+ last_rewrite_token_indexes: map[string]int{},
}
}
-func (tsr *TokenStreamRewriter) GetTokenStream() TokenStream{
+func (tsr *TokenStreamRewriter) GetTokenStream() TokenStream {
return tsr.tokens
}
-// Rollback the instruction stream for a program so that
-// the indicated instruction (via instructionIndex) is no
-// longer in the stream. UNTESTED!
-func (tsr *TokenStreamRewriter) Rollback(program_name string, instruction_index int){
- is, ok := tsr.programs[program_name]
- if ok{
+// Rollback the instruction stream for a program so that
+// the indicated instruction (via instructionIndex) is no
+// longer in the stream. UNTESTED!
+func (tsr *TokenStreamRewriter) Rollback(program_name string, instruction_index int) {
+ is, ok := tsr.programs[program_name]
+ if ok {
tsr.programs[program_name] = is[Min_Token_Index:instruction_index]
}
}
-func (tsr *TokenStreamRewriter) RollbackDefault(instruction_index int){
+func (tsr *TokenStreamRewriter) RollbackDefault(instruction_index int) {
tsr.Rollback(Default_Program_Name, instruction_index)
}
-//Reset the program so that no instructions exist
-func (tsr *TokenStreamRewriter) DeleteProgram(program_name string){
+
+// Reset the program so that no instructions exist
+func (tsr *TokenStreamRewriter) DeleteProgram(program_name string) {
tsr.Rollback(program_name, Min_Token_Index) //TODO: double test on that cause lower bound is not included
}
-func (tsr *TokenStreamRewriter) DeleteProgramDefault(){
+func (tsr *TokenStreamRewriter) DeleteProgramDefault() {
tsr.DeleteProgram(Default_Program_Name)
}
-func (tsr *TokenStreamRewriter) InsertAfter(program_name string, index int, text string){
+func (tsr *TokenStreamRewriter) InsertAfter(program_name string, index int, text string) {
// to insert after, just insert before next index (even if past end)
var op RewriteOperation = NewInsertAfterOp(index, text, tsr.tokens)
rewrites := tsr.GetProgram(program_name)
@@ -323,31 +319,31 @@ func (tsr *TokenStreamRewriter) InsertAfter(program_name string, index int, text
tsr.AddToProgram(program_name, op)
}
-func (tsr *TokenStreamRewriter) InsertAfterDefault(index int, text string){
+func (tsr *TokenStreamRewriter) InsertAfterDefault(index int, text string) {
tsr.InsertAfter(Default_Program_Name, index, text)
}
-func (tsr *TokenStreamRewriter) InsertAfterToken(program_name string, token Token, text string){
+func (tsr *TokenStreamRewriter) InsertAfterToken(program_name string, token Token, text string) {
tsr.InsertAfter(program_name, token.GetTokenIndex(), text)
}
-func (tsr* TokenStreamRewriter) InsertBefore(program_name string, index int, text string){
+func (tsr *TokenStreamRewriter) InsertBefore(program_name string, index int, text string) {
var op RewriteOperation = NewInsertBeforeOp(index, text, tsr.tokens)
rewrites := tsr.GetProgram(program_name)
op.SetInstructionIndex(len(rewrites))
tsr.AddToProgram(program_name, op)
}
-func (tsr *TokenStreamRewriter) InsertBeforeDefault(index int, text string){
+func (tsr *TokenStreamRewriter) InsertBeforeDefault(index int, text string) {
tsr.InsertBefore(Default_Program_Name, index, text)
}
-func (tsr *TokenStreamRewriter) InsertBeforeToken(program_name string,token Token, text string){
+func (tsr *TokenStreamRewriter) InsertBeforeToken(program_name string, token Token, text string) {
tsr.InsertBefore(program_name, token.GetTokenIndex(), text)
}
-func (tsr *TokenStreamRewriter) Replace(program_name string, from, to int, text string){
- if from > to || from < 0 || to < 0 || to >= tsr.tokens.Size(){
+func (tsr *TokenStreamRewriter) Replace(program_name string, from, to int, text string) {
+ if from > to || from < 0 || to < 0 || to >= tsr.tokens.Size() {
panic(fmt.Sprintf("replace: range invalid: %d..%d(size=%d)",
from, to, tsr.tokens.Size()))
}
@@ -357,207 +353,216 @@ func (tsr *TokenStreamRewriter) Replace(program_name string, from, to int, text
tsr.AddToProgram(program_name, op)
}
-func (tsr *TokenStreamRewriter)ReplaceDefault(from, to int, text string) {
+func (tsr *TokenStreamRewriter) ReplaceDefault(from, to int, text string) {
tsr.Replace(Default_Program_Name, from, to, text)
}
-func (tsr *TokenStreamRewriter)ReplaceDefaultPos(index int, text string){
+func (tsr *TokenStreamRewriter) ReplaceDefaultPos(index int, text string) {
tsr.ReplaceDefault(index, index, text)
}
-func (tsr *TokenStreamRewriter)ReplaceToken(program_name string, from, to Token, text string){
+func (tsr *TokenStreamRewriter) ReplaceToken(program_name string, from, to Token, text string) {
tsr.Replace(program_name, from.GetTokenIndex(), to.GetTokenIndex(), text)
}
-func (tsr *TokenStreamRewriter)ReplaceTokenDefault(from, to Token, text string){
+func (tsr *TokenStreamRewriter) ReplaceTokenDefault(from, to Token, text string) {
tsr.ReplaceToken(Default_Program_Name, from, to, text)
}
-func (tsr *TokenStreamRewriter)ReplaceTokenDefaultPos(index Token, text string){
+func (tsr *TokenStreamRewriter) ReplaceTokenDefaultPos(index Token, text string) {
tsr.ReplaceTokenDefault(index, index, text)
}
-func (tsr *TokenStreamRewriter)Delete(program_name string, from, to int){
- tsr.Replace(program_name, from, to, "" )
+func (tsr *TokenStreamRewriter) Delete(program_name string, from, to int) {
+ tsr.Replace(program_name, from, to, "")
}
-func (tsr *TokenStreamRewriter)DeleteDefault(from, to int){
+func (tsr *TokenStreamRewriter) DeleteDefault(from, to int) {
tsr.Delete(Default_Program_Name, from, to)
}
-func (tsr *TokenStreamRewriter)DeleteDefaultPos(index int){
- tsr.DeleteDefault(index,index)
+func (tsr *TokenStreamRewriter) DeleteDefaultPos(index int) {
+ tsr.DeleteDefault(index, index)
}
-func (tsr *TokenStreamRewriter)DeleteToken(program_name string, from, to Token) {
+func (tsr *TokenStreamRewriter) DeleteToken(program_name string, from, to Token) {
tsr.ReplaceToken(program_name, from, to, "")
}
-func (tsr *TokenStreamRewriter)DeleteTokenDefault(from,to Token){
+func (tsr *TokenStreamRewriter) DeleteTokenDefault(from, to Token) {
tsr.DeleteToken(Default_Program_Name, from, to)
}
-func (tsr *TokenStreamRewriter)GetLastRewriteTokenIndex(program_name string)int {
+func (tsr *TokenStreamRewriter) GetLastRewriteTokenIndex(program_name string) int {
i, ok := tsr.last_rewrite_token_indexes[program_name]
- if !ok{
+ if !ok {
return -1
}
return i
}
-func (tsr *TokenStreamRewriter)GetLastRewriteTokenIndexDefault()int{
+func (tsr *TokenStreamRewriter) GetLastRewriteTokenIndexDefault() int {
return tsr.GetLastRewriteTokenIndex(Default_Program_Name)
}
-func (tsr *TokenStreamRewriter)SetLastRewriteTokenIndex(program_name string, i int){
+func (tsr *TokenStreamRewriter) SetLastRewriteTokenIndex(program_name string, i int) {
tsr.last_rewrite_token_indexes[program_name] = i
}
-func (tsr *TokenStreamRewriter)InitializeProgram(name string)[]RewriteOperation{
+func (tsr *TokenStreamRewriter) InitializeProgram(name string) []RewriteOperation {
is := make([]RewriteOperation, 0, Program_Init_Size)
tsr.programs[name] = is
return is
}
-func (tsr *TokenStreamRewriter)AddToProgram(name string, op RewriteOperation){
+func (tsr *TokenStreamRewriter) AddToProgram(name string, op RewriteOperation) {
is := tsr.GetProgram(name)
is = append(is, op)
tsr.programs[name] = is
}
-func (tsr *TokenStreamRewriter)GetProgram(name string) []RewriteOperation {
+func (tsr *TokenStreamRewriter) GetProgram(name string) []RewriteOperation {
is, ok := tsr.programs[name]
- if !ok{
+ if !ok {
is = tsr.InitializeProgram(name)
}
return is
}
-// Return the text from the original tokens altered per the
-// instructions given to this rewriter.
-func (tsr *TokenStreamRewriter)GetTextDefault() string{
+
+// Return the text from the original tokens altered per the
+// instructions given to this rewriter.
+func (tsr *TokenStreamRewriter) GetTextDefault() string {
return tsr.GetText(
Default_Program_Name,
NewInterval(0, tsr.tokens.Size()-1))
}
-// Return the text from the original tokens altered per the
-// instructions given to this rewriter.
-func (tsr *TokenStreamRewriter)GetText(program_name string, interval *Interval) string {
+
+// Return the text from the original tokens altered per the
+// instructions given to this rewriter.
+func (tsr *TokenStreamRewriter) GetText(program_name string, interval *Interval) string {
rewrites := tsr.programs[program_name]
start := interval.Start
- stop := interval.Stop
+ stop := interval.Stop
// ensure start/end are in range
stop = min(stop, tsr.tokens.Size()-1)
- start = max(start,0)
- if rewrites == nil || len(rewrites) == 0{
+ start = max(start, 0)
+ if rewrites == nil || len(rewrites) == 0 {
return tsr.tokens.GetTextFromInterval(interval) // no instructions to execute
}
buf := bytes.Buffer{}
// First, optimize instruction stream
indexToOp := reduceToSingleOperationPerIndex(rewrites)
// Walk buffer, executing instructions and emitting tokens
- for i:=start; i<=stop && i= tsr.tokens.Size()-1 {buf.WriteString(op.GetText())}
+ for _, op := range indexToOp {
+ if op.GetIndex() >= tsr.tokens.Size()-1 {
+ buf.WriteString(op.GetText())
+ }
}
}
return buf.String()
}
-// We need to combine operations and report invalid operations (like
-// overlapping replaces that are not completed nested). Inserts to
-// same index need to be combined etc... Here are the cases:
+// We need to combine operations and report invalid operations (like
+// overlapping replaces that are not completed nested). Inserts to
+// same index need to be combined etc... Here are the cases:
//
-// I.i.u I.j.v leave alone, nonoverlapping
-// I.i.u I.i.v combine: Iivu
+// I.i.u I.j.v leave alone, nonoverlapping
+// I.i.u I.i.v combine: Iivu
//
-// R.i-j.u R.x-y.v | i-j in x-y delete first R
-// R.i-j.u R.i-j.v delete first R
-// R.i-j.u R.x-y.v | x-y in i-j ERROR
-// R.i-j.u R.x-y.v | boundaries overlap ERROR
+// R.i-j.u R.x-y.v | i-j in x-y delete first R
+// R.i-j.u R.i-j.v delete first R
+// R.i-j.u R.x-y.v | x-y in i-j ERROR
+// R.i-j.u R.x-y.v | boundaries overlap ERROR
//
-// Delete special case of replace (text==null):
-// D.i-j.u D.x-y.v | boundaries overlap combine to max(min)..max(right)
+// Delete special case of replace (text==null):
+// D.i-j.u D.x-y.v | boundaries overlap combine to max(min)..max(right)
//
-// I.i.u R.x-y.v | i in (x+1)-y delete I (since insert before
-// we're not deleting i)
-// I.i.u R.x-y.v | i not in (x+1)-y leave alone, nonoverlapping
-// R.x-y.v I.i.u | i in x-y ERROR
-// R.x-y.v I.x.u R.x-y.uv (combine, delete I)
-// R.x-y.v I.i.u | i not in x-y leave alone, nonoverlapping
+// I.i.u R.x-y.v | i in (x+1)-y delete I (since insert before
+// we're not deleting i)
+// I.i.u R.x-y.v | i not in (x+1)-y leave alone, nonoverlapping
+// R.x-y.v I.i.u | i in x-y ERROR
+// R.x-y.v I.x.u R.x-y.uv (combine, delete I)
+// R.x-y.v I.i.u | i not in x-y leave alone, nonoverlapping
//
-// I.i.u = insert u before op @ index i
-// R.x-y.u = replace x-y indexed tokens with u
+// I.i.u = insert u before op @ index i
+// R.x-y.u = replace x-y indexed tokens with u
//
-// First we need to examine replaces. For any replace op:
+// First we need to examine replaces. For any replace op:
//
-// 1. wipe out any insertions before op within that range.
-// 2. Drop any replace op before that is contained completely within
-// that range.
-// 3. Throw exception upon boundary overlap with any previous replace.
+// 1. wipe out any insertions before op within that range.
+// 2. Drop any replace op before that is contained completely within
+// that range.
+// 3. Throw exception upon boundary overlap with any previous replace.
//
-// Then we can deal with inserts:
+// Then we can deal with inserts:
//
-// 1. for any inserts to same index, combine even if not adjacent.
-// 2. for any prior replace with same left boundary, combine this
-// insert with replace and delete this replace.
-// 3. throw exception if index in same range as previous replace
+// 1. for any inserts to same index, combine even if not adjacent.
+// 2. for any prior replace with same left boundary, combine this
+// insert with replace and delete this replace.
+// 3. throw exception if index in same range as previous replace
//
-// Don't actually delete; make op null in list. Easier to walk list.
-// Later we can throw as we add to index → op map.
+// Don't actually delete; make op null in list. Easier to walk list.
+// Later we can throw as we add to index → op map.
//
-// Note that I.2 R.2-2 will wipe out I.2 even though, technically, the
-// inserted stuff would be before the replace range. But, if you
-// add tokens in front of a method body '{' and then delete the method
-// body, I think the stuff before the '{' you added should disappear too.
+// Note that I.2 R.2-2 will wipe out I.2 even though, technically, the
+// inserted stuff would be before the replace range. But, if you
+// add tokens in front of a method body '{' and then delete the method
+// body, I think the stuff before the '{' you added should disappear too.
//
-// Return a map from token index to operation.
-//
-func reduceToSingleOperationPerIndex(rewrites []RewriteOperation) map[int]RewriteOperation{
+// Return a map from token index to operation.
+func reduceToSingleOperationPerIndex(rewrites []RewriteOperation) map[int]RewriteOperation {
// WALK REPLACES
- for i:=0; i < len(rewrites); i++{
+ for i := 0; i < len(rewrites); i++ {
op := rewrites[i]
- if op == nil{continue}
+ if op == nil {
+ continue
+ }
rop, ok := op.(*ReplaceOp)
- if !ok{continue}
+ if !ok {
+ continue
+ }
// Wipe prior inserts within range
- for j:=0; j rop.index && iop.index <=rop.LastIndex{
+ } else if iop.index > rop.index && iop.index <= rop.LastIndex {
// delete insert as it's a no-op.
rewrites[iop.instruction_index] = nil
}
}
}
// Drop any prior replaces contained within
- for j:=0; j=rop.index && prevop.LastIndex <= rop.LastIndex{
+ for j := 0; j < i && j < len(rewrites); j++ {
+ if prevop, ok := rewrites[j].(*ReplaceOp); ok {
+ if prevop.index >= rop.index && prevop.LastIndex <= rop.LastIndex {
// delete replace as it's a no-op.
rewrites[prevop.instruction_index] = nil
continue
@@ -566,61 +571,67 @@ func reduceToSingleOperationPerIndex(rewrites []RewriteOperation) map[int]Rewrit
disjoint := prevop.LastIndex < rop.index || prevop.index > rop.LastIndex
// Delete special case of replace (text==null):
// D.i-j.u D.x-y.v | boundaries overlap combine to max(min)..max(right)
- if prevop.text == "" && rop.text == "" && !disjoint{
+ if prevop.text == "" && rop.text == "" && !disjoint {
rewrites[prevop.instruction_index] = nil
rop.index = min(prevop.index, rop.index)
rop.LastIndex = max(prevop.LastIndex, rop.LastIndex)
println("new rop" + rop.String()) //TODO: remove console write, taken from Java version
- }else if !disjoint{
+ } else if !disjoint {
panic("replace op boundaries of " + rop.String() + " overlap with previous " + prevop.String())
}
}
}
}
// WALK INSERTS
- for i:=0; i < len(rewrites); i++ {
+ for i := 0; i < len(rewrites); i++ {
op := rewrites[i]
- if op == nil{continue}
+ if op == nil {
+ continue
+ }
//hack to replicate inheritance in composition
_, iok := rewrites[i].(*InsertBeforeOp)
_, aok := rewrites[i].(*InsertAfterOp)
- if !iok && !aok{continue}
+ if !iok && !aok {
+ continue
+ }
iop := rewrites[i]
// combine current insert with prior if any at same index
// deviating a bit from TokenStreamRewriter.java - hard to incorporate inheritance logic
- for j:=0; j= rop.index && iop.GetIndex() <= rop.LastIndex{
- panic("insert op "+iop.String()+" within boundaries of previous "+rop.String())
+ if iop.GetIndex() >= rop.index && iop.GetIndex() <= rop.LastIndex {
+ panic("insert op " + iop.String() + " within boundaries of previous " + rop.String())
}
}
}
}
m := map[int]RewriteOperation{}
- for i:=0; i < len(rewrites); i++{
+ for i := 0; i < len(rewrites); i++ {
op := rewrites[i]
- if op == nil {continue}
- if _, ok := m[op.GetIndex()]; ok{
+ if op == nil {
+ continue
+ }
+ if _, ok := m[op.GetIndex()]; ok {
panic("should only be one op per index")
}
m[op.GetIndex()] = op
@@ -628,22 +639,21 @@ func reduceToSingleOperationPerIndex(rewrites []RewriteOperation) map[int]Rewrit
return m
}
-
/*
Quick fixing Go lack of overloads
- */
+*/
-func max(a,b int)int{
- if a>b{
+func max(a, b int) int {
+ if a > b {
return a
- }else {
+ } else {
return b
}
}
-func min(a,b int)int{
- if a as.threshold {
as.expand()
}
@@ -98,7 +96,7 @@ func (as *array2DHashSet) expand() {
b := as.getBuckets(o)
bucketLength := newBucketLengths[b]
- var newBucket []interface{}
+ var newBucket []Collectable[any]
if bucketLength == 0 {
// new bucket
newBucket = as.createBucket(as.initialBucketCapacity)
@@ -107,7 +105,7 @@ func (as *array2DHashSet) expand() {
newBucket = newTable[b]
if bucketLength == len(newBucket) {
// expand
- newBucketCopy := make([]interface{}, len(newBucket)<<1)
+ newBucketCopy := make([]Collectable[any], len(newBucket)<<1)
copy(newBucketCopy[:bucketLength], newBucket)
newBucket = newBucketCopy
newTable[b] = newBucket
@@ -124,7 +122,7 @@ func (as *array2DHashSet) Len() int {
return as.n
}
-func (as *array2DHashSet) Get(o interface{}) interface{} {
+func (as *array2DHashSet) Get(o Collectable[any]) interface{} {
if o == nil {
return nil
}
@@ -147,7 +145,7 @@ func (as *array2DHashSet) Get(o interface{}) interface{} {
return nil
}
-func (as *array2DHashSet) innerAdd(o interface{}) interface{} {
+func (as *array2DHashSet) innerAdd(o Collectable[any]) interface{} {
b := as.getBuckets(o)
bucket := as.buckets[b]
@@ -178,7 +176,7 @@ func (as *array2DHashSet) innerAdd(o interface{}) interface{} {
// full bucket, expand and add to end
oldLength := len(bucket)
- bucketCopy := make([]interface{}, oldLength<<1)
+ bucketCopy := make([]Collectable[any], oldLength<<1)
copy(bucketCopy[:oldLength], bucket)
bucket = bucketCopy
as.buckets[b] = bucket
@@ -187,22 +185,22 @@ func (as *array2DHashSet) innerAdd(o interface{}) interface{} {
return o
}
-func (as *array2DHashSet) getBuckets(value interface{}) int {
+func (as *array2DHashSet) getBuckets(value Collectable[any]) int {
hash := as.hashcodeFunction(value)
return hash & (len(as.buckets) - 1)
}
-func (as *array2DHashSet) createBuckets(cap int) [][]interface{} {
- return make([][]interface{}, cap)
+func (as *array2DHashSet) createBuckets(cap int) [][]Collectable[any] {
+ return make([][]Collectable[any], cap)
}
-func (as *array2DHashSet) createBucket(cap int) []interface{} {
- return make([]interface{}, cap)
+func (as *array2DHashSet) createBucket(cap int) []Collectable[any] {
+ return make([]Collectable[any], cap)
}
func newArray2DHashSetWithCap(
hashcodeFunction func(interface{}) int,
- equalsFunction func(interface{}, interface{}) bool,
+ equalsFunction func(Collectable[any], Collectable[any]) bool,
initCap int,
initBucketCap int,
) *array2DHashSet {
@@ -231,7 +229,7 @@ func newArray2DHashSetWithCap(
func newArray2DHashSet(
hashcodeFunction func(interface{}) int,
- equalsFunction func(interface{}, interface{}) bool,
+ equalsFunction func(Collectable[any], Collectable[any]) bool,
) *array2DHashSet {
return newArray2DHashSetWithCap(hashcodeFunction, equalsFunction, _initalCapacity, _initalBucketCapacity)
}
diff --git a/vendor/github.com/container-storage-interface/spec/lib/go/csi/csi.pb.go b/vendor/github.com/container-storage-interface/spec/lib/go/csi/csi.pb.go
index d889edb2c8..fa010c376b 100644
--- a/vendor/github.com/container-storage-interface/spec/lib/go/csi/csi.pb.go
+++ b/vendor/github.com/container-storage-interface/spec/lib/go/csi/csi.pb.go
@@ -47,18 +47,28 @@ const (
// returned by NodeGetInfo to ensure that a given volume is
// accessible from a given node when scheduling workloads.
PluginCapability_Service_VOLUME_ACCESSIBILITY_CONSTRAINTS PluginCapability_Service_Type = 2
+ // GROUP_CONTROLLER_SERVICE indicates that the Plugin provides
+ // RPCs for operating on groups of volumes. Plugins MAY provide
+ // this capability.
+ // The presence of this capability determines whether the CO will
+ // attempt to invoke the REQUIRED GroupController service RPCs, as
+ // well as specific RPCs as indicated by
+ // GroupControllerGetCapabilities.
+ PluginCapability_Service_GROUP_CONTROLLER_SERVICE PluginCapability_Service_Type = 3
)
var PluginCapability_Service_Type_name = map[int32]string{
0: "UNKNOWN",
1: "CONTROLLER_SERVICE",
2: "VOLUME_ACCESSIBILITY_CONSTRAINTS",
+ 3: "GROUP_CONTROLLER_SERVICE",
}
var PluginCapability_Service_Type_value = map[string]int32{
"UNKNOWN": 0,
"CONTROLLER_SERVICE": 1,
"VOLUME_ACCESSIBILITY_CONSTRAINTS": 2,
+ "GROUP_CONTROLLER_SERVICE": 3,
}
func (x PluginCapability_Service_Type) String() string {
@@ -85,17 +95,20 @@ const (
// expansion of node-published volume via NodeExpandVolume.
//
// Example 1: Given a shared filesystem volume (e.g. GlusterFs),
- // the Plugin may set the ONLINE volume expansion capability and
- // implement ControllerExpandVolume but not NodeExpandVolume.
+ //
+ // the Plugin may set the ONLINE volume expansion capability and
+ // implement ControllerExpandVolume but not NodeExpandVolume.
//
// Example 2: Given a block storage volume type (e.g. EBS), the
- // Plugin may set the ONLINE volume expansion capability and
- // implement both ControllerExpandVolume and NodeExpandVolume.
+ //
+ // Plugin may set the ONLINE volume expansion capability and
+ // implement both ControllerExpandVolume and NodeExpandVolume.
//
// Example 3: Given a Plugin that supports volume expansion only
- // upon a node, the Plugin may set the ONLINE volume
- // expansion capability and implement NodeExpandVolume but not
- // ControllerExpandVolume.
+ //
+ // upon a node, the Plugin may set the ONLINE volume
+ // expansion capability and implement NodeExpandVolume but not
+ // ControllerExpandVolume.
PluginCapability_VolumeExpansion_ONLINE PluginCapability_VolumeExpansion_Type = 1
// OFFLINE indicates that volumes currently published and
// available on a node SHALL NOT be expanded via
@@ -105,10 +118,11 @@ const (
// the EXPAND_VOLUME node capability.
//
// Example 1: Given a block storage volume type (e.g. Azure Disk)
- // that does not support expansion of "node-attached" (i.e.
- // controller-published) volumes, the Plugin may indicate
- // OFFLINE volume expansion support and implement both
- // ControllerExpandVolume and NodeExpandVolume.
+ //
+ // that does not support expansion of "node-attached" (i.e.
+ // controller-published) volumes, the Plugin may indicate
+ // OFFLINE volume expansion support and implement both
+ // ControllerExpandVolume and NodeExpandVolume.
PluginCapability_VolumeExpansion_OFFLINE PluginCapability_VolumeExpansion_Type = 2
)
@@ -385,6 +399,34 @@ func (NodeServiceCapability_RPC_Type) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_9cdb00adce470e01, []int{55, 0, 0}
}
+type GroupControllerServiceCapability_RPC_Type int32
+
+const (
+ GroupControllerServiceCapability_RPC_UNKNOWN GroupControllerServiceCapability_RPC_Type = 0
+ // Indicates that the group controller plugin supports
+ // creating, deleting, and getting details of a volume
+ // group snapshot.
+ GroupControllerServiceCapability_RPC_CREATE_DELETE_GET_VOLUME_GROUP_SNAPSHOT GroupControllerServiceCapability_RPC_Type = 1
+)
+
+var GroupControllerServiceCapability_RPC_Type_name = map[int32]string{
+ 0: "UNKNOWN",
+ 1: "CREATE_DELETE_GET_VOLUME_GROUP_SNAPSHOT",
+}
+
+var GroupControllerServiceCapability_RPC_Type_value = map[string]int32{
+ "UNKNOWN": 0,
+ "CREATE_DELETE_GET_VOLUME_GROUP_SNAPSHOT": 1,
+}
+
+func (x GroupControllerServiceCapability_RPC_Type) String() string {
+ return proto.EnumName(GroupControllerServiceCapability_RPC_Type_name, int32(x))
+}
+
+func (GroupControllerServiceCapability_RPC_Type) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_9cdb00adce470e01, []int{62, 0, 0}
+}
+
type GetPluginInfoRequest struct {
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
@@ -555,6 +597,7 @@ func (m *GetPluginCapabilitiesResponse) GetCapabilities() []*PluginCapability {
// Specifies a capability of the plugin.
type PluginCapability struct {
// Types that are valid to be assigned to Type:
+ //
// *PluginCapability_Service_
// *PluginCapability_VolumeExpansion_
Type isPluginCapability_Type `protobuf_oneof:"type"`
@@ -748,16 +791,16 @@ type ProbeResponse struct {
// and it is important for a CO to distinguish between the following
// cases:
//
- // 1) The plugin is in an unhealthy state and MAY need restarting. In
- // this case a gRPC error code SHALL be returned.
- // 2) The plugin is still initializing, but is otherwise perfectly
- // healthy. In this case a successful response SHALL be returned
- // with a readiness value of `false`. Calls to the plugin's
- // Controller and/or Node services MAY fail due to an incomplete
- // initialization state.
- // 3) The plugin has finished initializing and is ready to service
- // calls to its Controller and/or Node services. A successful
- // response is returned with a readiness value of `true`.
+ // 1. The plugin is in an unhealthy state and MAY need restarting. In
+ // this case a gRPC error code SHALL be returned.
+ // 2. The plugin is still initializing, but is otherwise perfectly
+ // healthy. In this case a successful response SHALL be returned
+ // with a readiness value of `false`. Calls to the plugin's
+ // Controller and/or Node services MAY fail due to an incomplete
+ // initialization state.
+ // 3. The plugin has finished initializing and is ready to service
+ // calls to its Controller and/or Node services. A successful
+ // response is returned with a readiness value of `true`.
//
// This field is OPTIONAL. If not present, the caller SHALL assume
// that the plugin is in a ready state and is accepting calls to its
@@ -804,26 +847,27 @@ func (m *ProbeResponse) GetReady() *wrappers.BoolValue {
type CreateVolumeRequest struct {
// The suggested name for the storage space. This field is REQUIRED.
// It serves two purposes:
- // 1) Idempotency - This name is generated by the CO to achieve
- // idempotency. The Plugin SHOULD ensure that multiple
- // `CreateVolume` calls for the same name do not result in more
- // than one piece of storage provisioned corresponding to that
- // name. If a Plugin is unable to enforce idempotency, the CO's
- // error recovery logic could result in multiple (unused) volumes
- // being provisioned.
- // In the case of error, the CO MUST handle the gRPC error codes
- // per the recovery behavior defined in the "CreateVolume Errors"
- // section below.
- // The CO is responsible for cleaning up volumes it provisioned
- // that it no longer needs. If the CO is uncertain whether a volume
- // was provisioned or not when a `CreateVolume` call fails, the CO
- // MAY call `CreateVolume` again, with the same name, to ensure the
- // volume exists and to retrieve the volume's `volume_id` (unless
- // otherwise prohibited by "CreateVolume Errors").
- // 2) Suggested name - Some storage systems allow callers to specify
- // an identifier by which to refer to the newly provisioned
- // storage. If a storage system supports this, it can optionally
- // use this name as the identifier for the new volume.
+ // 1. Idempotency - This name is generated by the CO to achieve
+ // idempotency. The Plugin SHOULD ensure that multiple
+ // `CreateVolume` calls for the same name do not result in more
+ // than one piece of storage provisioned corresponding to that
+ // name. If a Plugin is unable to enforce idempotency, the CO's
+ // error recovery logic could result in multiple (unused) volumes
+ // being provisioned.
+ // In the case of error, the CO MUST handle the gRPC error codes
+ // per the recovery behavior defined in the "CreateVolume Errors"
+ // section below.
+ // The CO is responsible for cleaning up volumes it provisioned
+ // that it no longer needs. If the CO is uncertain whether a volume
+ // was provisioned or not when a `CreateVolume` call fails, the CO
+ // MAY call `CreateVolume` again, with the same name, to ensure the
+ // volume exists and to retrieve the volume's `volume_id` (unless
+ // otherwise prohibited by "CreateVolume Errors").
+ // 2. Suggested name - Some storage systems allow callers to specify
+ // an identifier by which to refer to the newly provisioned
+ // storage. If a storage system supports this, it can optionally
+ // use this name as the identifier for the new volume.
+ //
// Any Unicode string that conforms to the length limit is allowed
// except those containing the following banned characters:
// U+0000-U+0008, U+000B, U+000C, U+000E-U+001F, U+007F-U+009F.
@@ -957,6 +1001,7 @@ func (m *CreateVolumeRequest) GetAccessibilityRequirements() *TopologyRequiremen
// type fields MUST be specified.
type VolumeContentSource struct {
// Types that are valid to be assigned to Type:
+ //
// *VolumeContentSource_Snapshot
// *VolumeContentSource_Volume
Type isVolumeContentSource_Type `protobuf_oneof:"type"`
@@ -1168,6 +1213,7 @@ type VolumeCapability struct {
// following fields MUST be specified.
//
// Types that are valid to be assigned to AccessType:
+ //
// *VolumeCapability_Block
// *VolumeCapability_Mount
AccessType isVolumeCapability_AccessType `protobuf_oneof:"access_type"`
@@ -1509,14 +1555,18 @@ type Volume struct {
// node.
//
// Example 1:
- // accessible_topology = {"region": "R1", "zone": "Z2"}
+ //
+ // accessible_topology = {"region": "R1", "zone": "Z2"}
+ //
// Indicates a volume accessible only from the "region" "R1" and the
// "zone" "Z2".
//
// Example 2:
- // accessible_topology =
- // {"region": "R1", "zone": "Z2"},
- // {"region": "R1", "zone": "Z3"}
+ //
+ // accessible_topology =
+ // {"region": "R1", "zone": "Z2"},
+ // {"region": "R1", "zone": "Z3"}
+ //
// Indicates a volume accessible from both "zone" "Z2" and "zone" "Z3"
// in the "region" "R1".
AccessibleTopology []*Topology `protobuf:"bytes,5,rep,name=accessible_topology,json=accessibleTopology,proto3" json:"accessible_topology,omitempty"`
@@ -1595,21 +1645,27 @@ type TopologyRequirement struct {
// accessible from at least one of the requisite topologies.
//
// Given
- // x = number of topologies provisioned volume is accessible from
- // n = number of requisite topologies
+ //
+ // x = number of topologies provisioned volume is accessible from
+ // n = number of requisite topologies
+ //
// The CO MUST ensure n >= 1. The SP MUST ensure x >= 1
// If x==n, then the SP MUST make the provisioned volume available to
// all topologies from the list of requisite topologies. If it is
// unable to do so, the SP MUST fail the CreateVolume call.
// For example, if a volume should be accessible from a single zone,
// and requisite =
- // {"region": "R1", "zone": "Z2"}
+ //
+ // {"region": "R1", "zone": "Z2"}
+ //
// then the provisioned volume MUST be accessible from the "region"
// "R1" and the "zone" "Z2".
// Similarly, if a volume should be accessible from two zones, and
// requisite =
- // {"region": "R1", "zone": "Z2"},
- // {"region": "R1", "zone": "Z3"}
+ //
+ // {"region": "R1", "zone": "Z2"},
+ // {"region": "R1", "zone": "Z3"}
+ //
// then the provisioned volume MUST be accessible from the "region"
// "R1" and both "zone" "Z2" and "zone" "Z3".
//
@@ -1618,18 +1674,23 @@ type TopologyRequirement struct {
// the CreateVolume call.
// For example, if a volume should be accessible from a single zone,
// and requisite =
- // {"region": "R1", "zone": "Z2"},
- // {"region": "R1", "zone": "Z3"}
+ //
+ // {"region": "R1", "zone": "Z2"},
+ // {"region": "R1", "zone": "Z3"}
+ //
// then the SP may choose to make the provisioned volume available in
// either the "zone" "Z2" or the "zone" "Z3" in the "region" "R1".
// Similarly, if a volume should be accessible from two zones, and
// requisite =
- // {"region": "R1", "zone": "Z2"},
- // {"region": "R1", "zone": "Z3"},
- // {"region": "R1", "zone": "Z4"}
+ //
+ // {"region": "R1", "zone": "Z2"},
+ // {"region": "R1", "zone": "Z3"},
+ // {"region": "R1", "zone": "Z4"}
+ //
// then the provisioned volume MUST be accessible from any combination
// of two unique topologies: e.g. "R1/Z2" and "R1/Z3", or "R1/Z2" and
- // "R1/Z4", or "R1/Z3" and "R1/Z4".
+ //
+ // "R1/Z4", or "R1/Z3" and "R1/Z4".
//
// If x>n, then the SP MUST make the provisioned volume available from
// all topologies from the list of requisite topologies and MAY choose
@@ -1638,7 +1699,9 @@ type TopologyRequirement struct {
// CreateVolume call.
// For example, if a volume should be accessible from two zones, and
// requisite =
- // {"region": "R1", "zone": "Z2"}
+ //
+ // {"region": "R1", "zone": "Z2"}
+ //
// then the provisioned volume MUST be accessible from the "region"
// "R1" and the "zone" "Z2" and the SP may select the second zone
// independently, e.g. "R1/Z4".
@@ -1667,10 +1730,14 @@ type TopologyRequirement struct {
// Example 1:
// Given a volume should be accessible from a single zone, and
// requisite =
- // {"region": "R1", "zone": "Z2"},
- // {"region": "R1", "zone": "Z3"}
+ //
+ // {"region": "R1", "zone": "Z2"},
+ // {"region": "R1", "zone": "Z3"}
+ //
// preferred =
- // {"region": "R1", "zone": "Z3"}
+ //
+ // {"region": "R1", "zone": "Z3"}
+ //
// then the SP SHOULD first attempt to make the provisioned volume
// available from "zone" "Z3" in the "region" "R1" and fall back to
// "zone" "Z2" in the "region" "R1" if that is not possible.
@@ -1678,13 +1745,17 @@ type TopologyRequirement struct {
// Example 2:
// Given a volume should be accessible from a single zone, and
// requisite =
- // {"region": "R1", "zone": "Z2"},
- // {"region": "R1", "zone": "Z3"},
- // {"region": "R1", "zone": "Z4"},
- // {"region": "R1", "zone": "Z5"}
+ //
+ // {"region": "R1", "zone": "Z2"},
+ // {"region": "R1", "zone": "Z3"},
+ // {"region": "R1", "zone": "Z4"},
+ // {"region": "R1", "zone": "Z5"}
+ //
// preferred =
- // {"region": "R1", "zone": "Z4"},
- // {"region": "R1", "zone": "Z2"}
+ //
+ // {"region": "R1", "zone": "Z4"},
+ // {"region": "R1", "zone": "Z2"}
+ //
// then the SP SHOULD first attempt to make the provisioned volume
// accessible from "zone" "Z4" in the "region" "R1" and fall back to
// "zone" "Z2" in the "region" "R1" if that is not possible. If that
@@ -1697,13 +1768,17 @@ type TopologyRequirement struct {
// the volume is accessible from two zones, aka synchronously
// replicated), and
// requisite =
- // {"region": "R1", "zone": "Z2"},
- // {"region": "R1", "zone": "Z3"},
- // {"region": "R1", "zone": "Z4"},
- // {"region": "R1", "zone": "Z5"}
+ //
+ // {"region": "R1", "zone": "Z2"},
+ // {"region": "R1", "zone": "Z3"},
+ // {"region": "R1", "zone": "Z4"},
+ // {"region": "R1", "zone": "Z5"}
+ //
// preferred =
- // {"region": "R1", "zone": "Z5"},
- // {"region": "R1", "zone": "Z3"}
+ //
+ // {"region": "R1", "zone": "Z5"},
+ // {"region": "R1", "zone": "Z3"}
+ //
// then the SP SHOULD first attempt to make the provisioned volume
// accessible from the combination of the two "zones" "Z5" and "Z3" in
// the "region" "R1". If that's not possible, it should fall back to
@@ -2972,6 +3047,7 @@ func (m *ControllerGetCapabilitiesResponse) GetCapabilities() []*ControllerServi
// Specifies a capability of the controller service.
type ControllerServiceCapability struct {
// Types that are valid to be assigned to Type:
+ //
// *ControllerServiceCapability_Rpc
Type isControllerServiceCapability_Type `protobuf_oneof:"type"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
@@ -3093,12 +3169,12 @@ type CreateSnapshotRequest struct {
// This field is OPTIONAL. The Plugin is responsible for parsing and
// validating these parameters. COs will treat these as opaque.
// Use cases for opaque parameters:
- // - Specify a policy to automatically clean up the snapshot.
- // - Specify an expiration date for the snapshot.
- // - Specify whether the snapshot is readonly or read/write.
- // - Specify if the snapshot should be replicated to some place.
- // - Specify primary or secondary for replication systems that
- // support snapshotting only on primary.
+ // - Specify a policy to automatically clean up the snapshot.
+ // - Specify an expiration date for the snapshot.
+ // - Specify whether the snapshot is readonly or read/write.
+ // - Specify if the snapshot should be replicated to some place.
+ // - Specify primary or secondary for replication systems that
+ // support snapshotting only on primary.
Parameters map[string]string `protobuf:"bytes,4,rep,name=parameters,proto3" json:"parameters,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
@@ -3230,7 +3306,21 @@ type Snapshot struct {
// Indicates if a snapshot is ready to use as a
// `volume_content_source` in a `CreateVolumeRequest`. The default
// value is false. This field is REQUIRED.
- ReadyToUse bool `protobuf:"varint,5,opt,name=ready_to_use,json=readyToUse,proto3" json:"ready_to_use,omitempty"`
+ ReadyToUse bool `protobuf:"varint,5,opt,name=ready_to_use,json=readyToUse,proto3" json:"ready_to_use,omitempty"`
+ // The ID of the volume group snapshot that this snapshot is part of.
+ // It uniquely identifies the group snapshot on the storage system.
+ // This field is OPTIONAL.
+ // If this snapshot is a member of a volume group snapshot, and it
+ // MUST NOT be deleted as a stand alone snapshot, then the SP
+ // MUST provide the ID of the volume group snapshot in this field.
+ // If provided, CO MUST use this field in subsequent volume group
+ // snapshot operations to indicate that this snapshot is part of the
+ // specified group snapshot.
+ // If not provided, CO SHALL treat the snapshot as independent,
+ // and SP SHALL allow it to be deleted separately.
+ // If this message is inside a VolumeGroupSnapshot message, the value
+ // MUST be the same as the group_snapshot_id in that message.
+ GroupSnapshotId string `protobuf:"bytes,6,opt,name=group_snapshot_id,json=groupSnapshotId,proto3" json:"group_snapshot_id,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
@@ -3296,6 +3386,13 @@ func (m *Snapshot) GetReadyToUse() bool {
return false
}
+func (m *Snapshot) GetGroupSnapshotId() string {
+ if m != nil {
+ return m.GroupSnapshotId
+ }
+ return ""
+}
+
type DeleteSnapshotRequest struct {
// The ID of the snapshot to be deleted.
// This field is REQUIRED.
@@ -4500,6 +4597,7 @@ func (m *NodeGetCapabilitiesResponse) GetCapabilities() []*NodeServiceCapability
// Specifies a capability of the node service.
type NodeServiceCapability struct {
// Types that are valid to be assigned to Type:
+ //
// *NodeServiceCapability_Rpc
Type isNodeServiceCapability_Type `protobuf_oneof:"type"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
@@ -4666,8 +4764,10 @@ type NodeGetInfoResponse struct {
// no topological constraints declared for V.
//
// Example 1:
- // accessible_topology =
- // {"region": "R1", "zone": "Z2"}
+ //
+ // accessible_topology =
+ // {"region": "R1", "zone": "Z2"}
+ //
// Indicates the node exists within the "region" "R1" and the "zone"
// "Z2".
AccessibleTopology *Topology `protobuf:"bytes,3,opt,name=accessible_topology,json=accessibleTopology,proto3" json:"accessible_topology,omitempty"`
@@ -4874,6 +4974,608 @@ func (m *NodeExpandVolumeResponse) GetCapacityBytes() int64 {
return 0
}
+type GroupControllerGetCapabilitiesRequest struct {
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *GroupControllerGetCapabilitiesRequest) Reset() { *m = GroupControllerGetCapabilitiesRequest{} }
+func (m *GroupControllerGetCapabilitiesRequest) String() string { return proto.CompactTextString(m) }
+func (*GroupControllerGetCapabilitiesRequest) ProtoMessage() {}
+func (*GroupControllerGetCapabilitiesRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_9cdb00adce470e01, []int{60}
+}
+
+func (m *GroupControllerGetCapabilitiesRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_GroupControllerGetCapabilitiesRequest.Unmarshal(m, b)
+}
+func (m *GroupControllerGetCapabilitiesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_GroupControllerGetCapabilitiesRequest.Marshal(b, m, deterministic)
+}
+func (m *GroupControllerGetCapabilitiesRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_GroupControllerGetCapabilitiesRequest.Merge(m, src)
+}
+func (m *GroupControllerGetCapabilitiesRequest) XXX_Size() int {
+ return xxx_messageInfo_GroupControllerGetCapabilitiesRequest.Size(m)
+}
+func (m *GroupControllerGetCapabilitiesRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_GroupControllerGetCapabilitiesRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GroupControllerGetCapabilitiesRequest proto.InternalMessageInfo
+
+type GroupControllerGetCapabilitiesResponse struct {
+ // All the capabilities that the group controller service supports.
+ // This field is OPTIONAL.
+ Capabilities []*GroupControllerServiceCapability `protobuf:"bytes,1,rep,name=capabilities,proto3" json:"capabilities,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *GroupControllerGetCapabilitiesResponse) Reset() {
+ *m = GroupControllerGetCapabilitiesResponse{}
+}
+func (m *GroupControllerGetCapabilitiesResponse) String() string { return proto.CompactTextString(m) }
+func (*GroupControllerGetCapabilitiesResponse) ProtoMessage() {}
+func (*GroupControllerGetCapabilitiesResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_9cdb00adce470e01, []int{61}
+}
+
+func (m *GroupControllerGetCapabilitiesResponse) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_GroupControllerGetCapabilitiesResponse.Unmarshal(m, b)
+}
+func (m *GroupControllerGetCapabilitiesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_GroupControllerGetCapabilitiesResponse.Marshal(b, m, deterministic)
+}
+func (m *GroupControllerGetCapabilitiesResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_GroupControllerGetCapabilitiesResponse.Merge(m, src)
+}
+func (m *GroupControllerGetCapabilitiesResponse) XXX_Size() int {
+ return xxx_messageInfo_GroupControllerGetCapabilitiesResponse.Size(m)
+}
+func (m *GroupControllerGetCapabilitiesResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_GroupControllerGetCapabilitiesResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GroupControllerGetCapabilitiesResponse proto.InternalMessageInfo
+
+func (m *GroupControllerGetCapabilitiesResponse) GetCapabilities() []*GroupControllerServiceCapability {
+ if m != nil {
+ return m.Capabilities
+ }
+ return nil
+}
+
+// Specifies a capability of the group controller service.
+type GroupControllerServiceCapability struct {
+ // Types that are valid to be assigned to Type:
+ //
+ // *GroupControllerServiceCapability_Rpc
+ Type isGroupControllerServiceCapability_Type `protobuf_oneof:"type"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *GroupControllerServiceCapability) Reset() { *m = GroupControllerServiceCapability{} }
+func (m *GroupControllerServiceCapability) String() string { return proto.CompactTextString(m) }
+func (*GroupControllerServiceCapability) ProtoMessage() {}
+func (*GroupControllerServiceCapability) Descriptor() ([]byte, []int) {
+ return fileDescriptor_9cdb00adce470e01, []int{62}
+}
+
+func (m *GroupControllerServiceCapability) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_GroupControllerServiceCapability.Unmarshal(m, b)
+}
+func (m *GroupControllerServiceCapability) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_GroupControllerServiceCapability.Marshal(b, m, deterministic)
+}
+func (m *GroupControllerServiceCapability) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_GroupControllerServiceCapability.Merge(m, src)
+}
+func (m *GroupControllerServiceCapability) XXX_Size() int {
+ return xxx_messageInfo_GroupControllerServiceCapability.Size(m)
+}
+func (m *GroupControllerServiceCapability) XXX_DiscardUnknown() {
+ xxx_messageInfo_GroupControllerServiceCapability.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GroupControllerServiceCapability proto.InternalMessageInfo
+
+type isGroupControllerServiceCapability_Type interface {
+ isGroupControllerServiceCapability_Type()
+}
+
+type GroupControllerServiceCapability_Rpc struct {
+ Rpc *GroupControllerServiceCapability_RPC `protobuf:"bytes,1,opt,name=rpc,proto3,oneof"`
+}
+
+func (*GroupControllerServiceCapability_Rpc) isGroupControllerServiceCapability_Type() {}
+
+func (m *GroupControllerServiceCapability) GetType() isGroupControllerServiceCapability_Type {
+ if m != nil {
+ return m.Type
+ }
+ return nil
+}
+
+func (m *GroupControllerServiceCapability) GetRpc() *GroupControllerServiceCapability_RPC {
+ if x, ok := m.GetType().(*GroupControllerServiceCapability_Rpc); ok {
+ return x.Rpc
+ }
+ return nil
+}
+
+// XXX_OneofWrappers is for the internal use of the proto package.
+func (*GroupControllerServiceCapability) XXX_OneofWrappers() []interface{} {
+ return []interface{}{
+ (*GroupControllerServiceCapability_Rpc)(nil),
+ }
+}
+
+type GroupControllerServiceCapability_RPC struct {
+ Type GroupControllerServiceCapability_RPC_Type `protobuf:"varint,1,opt,name=type,proto3,enum=csi.v1.GroupControllerServiceCapability_RPC_Type" json:"type,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *GroupControllerServiceCapability_RPC) Reset() { *m = GroupControllerServiceCapability_RPC{} }
+func (m *GroupControllerServiceCapability_RPC) String() string { return proto.CompactTextString(m) }
+func (*GroupControllerServiceCapability_RPC) ProtoMessage() {}
+func (*GroupControllerServiceCapability_RPC) Descriptor() ([]byte, []int) {
+ return fileDescriptor_9cdb00adce470e01, []int{62, 0}
+}
+
+func (m *GroupControllerServiceCapability_RPC) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_GroupControllerServiceCapability_RPC.Unmarshal(m, b)
+}
+func (m *GroupControllerServiceCapability_RPC) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_GroupControllerServiceCapability_RPC.Marshal(b, m, deterministic)
+}
+func (m *GroupControllerServiceCapability_RPC) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_GroupControllerServiceCapability_RPC.Merge(m, src)
+}
+func (m *GroupControllerServiceCapability_RPC) XXX_Size() int {
+ return xxx_messageInfo_GroupControllerServiceCapability_RPC.Size(m)
+}
+func (m *GroupControllerServiceCapability_RPC) XXX_DiscardUnknown() {
+ xxx_messageInfo_GroupControllerServiceCapability_RPC.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GroupControllerServiceCapability_RPC proto.InternalMessageInfo
+
+func (m *GroupControllerServiceCapability_RPC) GetType() GroupControllerServiceCapability_RPC_Type {
+ if m != nil {
+ return m.Type
+ }
+ return GroupControllerServiceCapability_RPC_UNKNOWN
+}
+
+type CreateVolumeGroupSnapshotRequest struct {
+ // The suggested name for the group snapshot. This field is REQUIRED
+ // for idempotency.
+ // Any Unicode string that conforms to the length limit is allowed
+ // except those containing the following banned characters:
+ // U+0000-U+0008, U+000B, U+000C, U+000E-U+001F, U+007F-U+009F.
+ // (These are control characters other than commonly used whitespace.)
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // volume IDs of the source volumes to be snapshotted together.
+ // This field is REQUIRED.
+ SourceVolumeIds []string `protobuf:"bytes,2,rep,name=source_volume_ids,json=sourceVolumeIds,proto3" json:"source_volume_ids,omitempty"`
+ // Secrets required by plugin to complete
+ // ControllerCreateVolumeGroupSnapshot request.
+ // This field is OPTIONAL. Refer to the `Secrets Requirements`
+ // section on how to use this field.
+ // The secrets provided in this field SHOULD be the same for
+ // all group snapshot operations on the same group snapshot.
+ Secrets map[string]string `protobuf:"bytes,3,rep,name=secrets,proto3" json:"secrets,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ // Plugin specific parameters passed in as opaque key-value pairs.
+ // This field is OPTIONAL. The Plugin is responsible for parsing and
+ // validating these parameters. COs will treat these as opaque.
+ Parameters map[string]string `protobuf:"bytes,4,rep,name=parameters,proto3" json:"parameters,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *CreateVolumeGroupSnapshotRequest) Reset() { *m = CreateVolumeGroupSnapshotRequest{} }
+func (m *CreateVolumeGroupSnapshotRequest) String() string { return proto.CompactTextString(m) }
+func (*CreateVolumeGroupSnapshotRequest) ProtoMessage() {}
+func (*CreateVolumeGroupSnapshotRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_9cdb00adce470e01, []int{63}
+}
+
+func (m *CreateVolumeGroupSnapshotRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_CreateVolumeGroupSnapshotRequest.Unmarshal(m, b)
+}
+func (m *CreateVolumeGroupSnapshotRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_CreateVolumeGroupSnapshotRequest.Marshal(b, m, deterministic)
+}
+func (m *CreateVolumeGroupSnapshotRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_CreateVolumeGroupSnapshotRequest.Merge(m, src)
+}
+func (m *CreateVolumeGroupSnapshotRequest) XXX_Size() int {
+ return xxx_messageInfo_CreateVolumeGroupSnapshotRequest.Size(m)
+}
+func (m *CreateVolumeGroupSnapshotRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_CreateVolumeGroupSnapshotRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CreateVolumeGroupSnapshotRequest proto.InternalMessageInfo
+
+func (m *CreateVolumeGroupSnapshotRequest) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+func (m *CreateVolumeGroupSnapshotRequest) GetSourceVolumeIds() []string {
+ if m != nil {
+ return m.SourceVolumeIds
+ }
+ return nil
+}
+
+func (m *CreateVolumeGroupSnapshotRequest) GetSecrets() map[string]string {
+ if m != nil {
+ return m.Secrets
+ }
+ return nil
+}
+
+func (m *CreateVolumeGroupSnapshotRequest) GetParameters() map[string]string {
+ if m != nil {
+ return m.Parameters
+ }
+ return nil
+}
+
+type CreateVolumeGroupSnapshotResponse struct {
+ // Contains all attributes of the newly created group snapshot.
+ // This field is REQUIRED.
+ GroupSnapshot *VolumeGroupSnapshot `protobuf:"bytes,1,opt,name=group_snapshot,json=groupSnapshot,proto3" json:"group_snapshot,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *CreateVolumeGroupSnapshotResponse) Reset() { *m = CreateVolumeGroupSnapshotResponse{} }
+func (m *CreateVolumeGroupSnapshotResponse) String() string { return proto.CompactTextString(m) }
+func (*CreateVolumeGroupSnapshotResponse) ProtoMessage() {}
+func (*CreateVolumeGroupSnapshotResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_9cdb00adce470e01, []int{64}
+}
+
+func (m *CreateVolumeGroupSnapshotResponse) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_CreateVolumeGroupSnapshotResponse.Unmarshal(m, b)
+}
+func (m *CreateVolumeGroupSnapshotResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_CreateVolumeGroupSnapshotResponse.Marshal(b, m, deterministic)
+}
+func (m *CreateVolumeGroupSnapshotResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_CreateVolumeGroupSnapshotResponse.Merge(m, src)
+}
+func (m *CreateVolumeGroupSnapshotResponse) XXX_Size() int {
+ return xxx_messageInfo_CreateVolumeGroupSnapshotResponse.Size(m)
+}
+func (m *CreateVolumeGroupSnapshotResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_CreateVolumeGroupSnapshotResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CreateVolumeGroupSnapshotResponse proto.InternalMessageInfo
+
+func (m *CreateVolumeGroupSnapshotResponse) GetGroupSnapshot() *VolumeGroupSnapshot {
+ if m != nil {
+ return m.GroupSnapshot
+ }
+ return nil
+}
+
+type VolumeGroupSnapshot struct {
+ // The identifier for this group snapshot, generated by the plugin.
+ // This field MUST contain enough information to uniquely identify
+ // this specific snapshot vs all other group snapshots supported by
+ // this plugin.
+ // This field SHALL be used by the CO in subsequent calls to refer to
+ // this group snapshot.
+ // The SP is NOT responsible for global uniqueness of
+ // group_snapshot_id across multiple SPs.
+ // This field is REQUIRED.
+ GroupSnapshotId string `protobuf:"bytes,1,opt,name=group_snapshot_id,json=groupSnapshotId,proto3" json:"group_snapshot_id,omitempty"`
+ // A list of snapshots belonging to this group.
+ // This field is REQUIRED.
+ Snapshots []*Snapshot `protobuf:"bytes,2,rep,name=snapshots,proto3" json:"snapshots,omitempty"`
+ // Timestamp of when the volume group snapshot was taken.
+ // This field is REQUIRED.
+ CreationTime *timestamp.Timestamp `protobuf:"bytes,3,opt,name=creation_time,json=creationTime,proto3" json:"creation_time,omitempty"`
+ // Indicates if all individual snapshots in the group snapshot
+ // are ready to use as a `volume_content_source` in a
+ // `CreateVolumeRequest`. The default value is false.
+ // If any snapshot in the list of snapshots in this message have
+ // ready_to_use set to false, the SP MUST set this field to false.
+ // If all of the snapshots in the list of snapshots in this message
+ // have ready_to_use set to true, the SP SHOULD set this field to
+ // true.
+ // This field is REQUIRED.
+ ReadyToUse bool `protobuf:"varint,4,opt,name=ready_to_use,json=readyToUse,proto3" json:"ready_to_use,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *VolumeGroupSnapshot) Reset() { *m = VolumeGroupSnapshot{} }
+func (m *VolumeGroupSnapshot) String() string { return proto.CompactTextString(m) }
+func (*VolumeGroupSnapshot) ProtoMessage() {}
+func (*VolumeGroupSnapshot) Descriptor() ([]byte, []int) {
+ return fileDescriptor_9cdb00adce470e01, []int{65}
+}
+
+func (m *VolumeGroupSnapshot) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_VolumeGroupSnapshot.Unmarshal(m, b)
+}
+func (m *VolumeGroupSnapshot) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_VolumeGroupSnapshot.Marshal(b, m, deterministic)
+}
+func (m *VolumeGroupSnapshot) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_VolumeGroupSnapshot.Merge(m, src)
+}
+func (m *VolumeGroupSnapshot) XXX_Size() int {
+ return xxx_messageInfo_VolumeGroupSnapshot.Size(m)
+}
+func (m *VolumeGroupSnapshot) XXX_DiscardUnknown() {
+ xxx_messageInfo_VolumeGroupSnapshot.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_VolumeGroupSnapshot proto.InternalMessageInfo
+
+func (m *VolumeGroupSnapshot) GetGroupSnapshotId() string {
+ if m != nil {
+ return m.GroupSnapshotId
+ }
+ return ""
+}
+
+func (m *VolumeGroupSnapshot) GetSnapshots() []*Snapshot {
+ if m != nil {
+ return m.Snapshots
+ }
+ return nil
+}
+
+func (m *VolumeGroupSnapshot) GetCreationTime() *timestamp.Timestamp {
+ if m != nil {
+ return m.CreationTime
+ }
+ return nil
+}
+
+func (m *VolumeGroupSnapshot) GetReadyToUse() bool {
+ if m != nil {
+ return m.ReadyToUse
+ }
+ return false
+}
+
+type DeleteVolumeGroupSnapshotRequest struct {
+ // The ID of the group snapshot to be deleted.
+ // This field is REQUIRED.
+ GroupSnapshotId string `protobuf:"bytes,1,opt,name=group_snapshot_id,json=groupSnapshotId,proto3" json:"group_snapshot_id,omitempty"`
+ // A list of snapshot IDs that are part of this group snapshot.
+ // If SP does not need to rely on this field to delete the snapshots
+ // in the group, it SHOULD check this field and report an error
+ // if it has the ability to detect a mismatch.
+ // Some SPs require this list to delete the snapshots in the group.
+ // If SP needs to use this field to delete the snapshots in the
+ // group, it MUST report an error if it has the ability to detect
+ // a mismatch.
+ // This field is REQUIRED.
+ SnapshotIds []string `protobuf:"bytes,2,rep,name=snapshot_ids,json=snapshotIds,proto3" json:"snapshot_ids,omitempty"`
+ // Secrets required by plugin to complete group snapshot deletion
+ // request.
+ // This field is OPTIONAL. Refer to the `Secrets Requirements`
+ // section on how to use this field.
+ // The secrets provided in this field SHOULD be the same for
+ // all group snapshot operations on the same group snapshot.
+ Secrets map[string]string `protobuf:"bytes,3,rep,name=secrets,proto3" json:"secrets,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *DeleteVolumeGroupSnapshotRequest) Reset() { *m = DeleteVolumeGroupSnapshotRequest{} }
+func (m *DeleteVolumeGroupSnapshotRequest) String() string { return proto.CompactTextString(m) }
+func (*DeleteVolumeGroupSnapshotRequest) ProtoMessage() {}
+func (*DeleteVolumeGroupSnapshotRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_9cdb00adce470e01, []int{66}
+}
+
+func (m *DeleteVolumeGroupSnapshotRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_DeleteVolumeGroupSnapshotRequest.Unmarshal(m, b)
+}
+func (m *DeleteVolumeGroupSnapshotRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_DeleteVolumeGroupSnapshotRequest.Marshal(b, m, deterministic)
+}
+func (m *DeleteVolumeGroupSnapshotRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DeleteVolumeGroupSnapshotRequest.Merge(m, src)
+}
+func (m *DeleteVolumeGroupSnapshotRequest) XXX_Size() int {
+ return xxx_messageInfo_DeleteVolumeGroupSnapshotRequest.Size(m)
+}
+func (m *DeleteVolumeGroupSnapshotRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_DeleteVolumeGroupSnapshotRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeleteVolumeGroupSnapshotRequest proto.InternalMessageInfo
+
+func (m *DeleteVolumeGroupSnapshotRequest) GetGroupSnapshotId() string {
+ if m != nil {
+ return m.GroupSnapshotId
+ }
+ return ""
+}
+
+func (m *DeleteVolumeGroupSnapshotRequest) GetSnapshotIds() []string {
+ if m != nil {
+ return m.SnapshotIds
+ }
+ return nil
+}
+
+func (m *DeleteVolumeGroupSnapshotRequest) GetSecrets() map[string]string {
+ if m != nil {
+ return m.Secrets
+ }
+ return nil
+}
+
+type DeleteVolumeGroupSnapshotResponse struct {
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *DeleteVolumeGroupSnapshotResponse) Reset() { *m = DeleteVolumeGroupSnapshotResponse{} }
+func (m *DeleteVolumeGroupSnapshotResponse) String() string { return proto.CompactTextString(m) }
+func (*DeleteVolumeGroupSnapshotResponse) ProtoMessage() {}
+func (*DeleteVolumeGroupSnapshotResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_9cdb00adce470e01, []int{67}
+}
+
+func (m *DeleteVolumeGroupSnapshotResponse) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_DeleteVolumeGroupSnapshotResponse.Unmarshal(m, b)
+}
+func (m *DeleteVolumeGroupSnapshotResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_DeleteVolumeGroupSnapshotResponse.Marshal(b, m, deterministic)
+}
+func (m *DeleteVolumeGroupSnapshotResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DeleteVolumeGroupSnapshotResponse.Merge(m, src)
+}
+func (m *DeleteVolumeGroupSnapshotResponse) XXX_Size() int {
+ return xxx_messageInfo_DeleteVolumeGroupSnapshotResponse.Size(m)
+}
+func (m *DeleteVolumeGroupSnapshotResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_DeleteVolumeGroupSnapshotResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeleteVolumeGroupSnapshotResponse proto.InternalMessageInfo
+
+type GetVolumeGroupSnapshotRequest struct {
+ // The ID of the group snapshot to fetch current group snapshot
+ // information for.
+ // This field is REQUIRED.
+ GroupSnapshotId string `protobuf:"bytes,1,opt,name=group_snapshot_id,json=groupSnapshotId,proto3" json:"group_snapshot_id,omitempty"`
+ // A list of snapshot IDs that are part of this group snapshot.
+ // If SP does not need to rely on this field to get the snapshots
+ // in the group, it SHOULD check this field and report an error
+ // if it has the ability to detect a mismatch.
+ // Some SPs require this list to get the snapshots in the group.
+ // If SP needs to use this field to get the snapshots in the
+ // group, it MUST report an error if it has the ability to detect
+ // a mismatch.
+ // This field is REQUIRED.
+ SnapshotIds []string `protobuf:"bytes,2,rep,name=snapshot_ids,json=snapshotIds,proto3" json:"snapshot_ids,omitempty"`
+ // Secrets required by plugin to complete
+ // GetVolumeGroupSnapshot request.
+ // This field is OPTIONAL. Refer to the `Secrets Requirements`
+ // section on how to use this field.
+ // The secrets provided in this field SHOULD be the same for
+ // all group snapshot operations on the same group snapshot.
+ Secrets map[string]string `protobuf:"bytes,3,rep,name=secrets,proto3" json:"secrets,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *GetVolumeGroupSnapshotRequest) Reset() { *m = GetVolumeGroupSnapshotRequest{} }
+func (m *GetVolumeGroupSnapshotRequest) String() string { return proto.CompactTextString(m) }
+func (*GetVolumeGroupSnapshotRequest) ProtoMessage() {}
+func (*GetVolumeGroupSnapshotRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_9cdb00adce470e01, []int{68}
+}
+
+func (m *GetVolumeGroupSnapshotRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_GetVolumeGroupSnapshotRequest.Unmarshal(m, b)
+}
+func (m *GetVolumeGroupSnapshotRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_GetVolumeGroupSnapshotRequest.Marshal(b, m, deterministic)
+}
+func (m *GetVolumeGroupSnapshotRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_GetVolumeGroupSnapshotRequest.Merge(m, src)
+}
+func (m *GetVolumeGroupSnapshotRequest) XXX_Size() int {
+ return xxx_messageInfo_GetVolumeGroupSnapshotRequest.Size(m)
+}
+func (m *GetVolumeGroupSnapshotRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_GetVolumeGroupSnapshotRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GetVolumeGroupSnapshotRequest proto.InternalMessageInfo
+
+func (m *GetVolumeGroupSnapshotRequest) GetGroupSnapshotId() string {
+ if m != nil {
+ return m.GroupSnapshotId
+ }
+ return ""
+}
+
+func (m *GetVolumeGroupSnapshotRequest) GetSnapshotIds() []string {
+ if m != nil {
+ return m.SnapshotIds
+ }
+ return nil
+}
+
+func (m *GetVolumeGroupSnapshotRequest) GetSecrets() map[string]string {
+ if m != nil {
+ return m.Secrets
+ }
+ return nil
+}
+
+type GetVolumeGroupSnapshotResponse struct {
+ // This field is REQUIRED
+ GroupSnapshot *VolumeGroupSnapshot `protobuf:"bytes,1,opt,name=group_snapshot,json=groupSnapshot,proto3" json:"group_snapshot,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *GetVolumeGroupSnapshotResponse) Reset() { *m = GetVolumeGroupSnapshotResponse{} }
+func (m *GetVolumeGroupSnapshotResponse) String() string { return proto.CompactTextString(m) }
+func (*GetVolumeGroupSnapshotResponse) ProtoMessage() {}
+func (*GetVolumeGroupSnapshotResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_9cdb00adce470e01, []int{69}
+}
+
+func (m *GetVolumeGroupSnapshotResponse) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_GetVolumeGroupSnapshotResponse.Unmarshal(m, b)
+}
+func (m *GetVolumeGroupSnapshotResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_GetVolumeGroupSnapshotResponse.Marshal(b, m, deterministic)
+}
+func (m *GetVolumeGroupSnapshotResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_GetVolumeGroupSnapshotResponse.Merge(m, src)
+}
+func (m *GetVolumeGroupSnapshotResponse) XXX_Size() int {
+ return xxx_messageInfo_GetVolumeGroupSnapshotResponse.Size(m)
+}
+func (m *GetVolumeGroupSnapshotResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_GetVolumeGroupSnapshotResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GetVolumeGroupSnapshotResponse proto.InternalMessageInfo
+
+func (m *GetVolumeGroupSnapshotResponse) GetGroupSnapshot() *VolumeGroupSnapshot {
+ if m != nil {
+ return m.GroupSnapshot
+ }
+ return nil
+}
+
var E_AlphaEnum = &proto.ExtensionDesc{
ExtendedType: (*descriptor.EnumOptions)(nil),
ExtensionType: (*bool)(nil),
@@ -4944,6 +5646,7 @@ func init() {
proto.RegisterEnum("csi.v1.ControllerServiceCapability_RPC_Type", ControllerServiceCapability_RPC_Type_name, ControllerServiceCapability_RPC_Type_value)
proto.RegisterEnum("csi.v1.VolumeUsage_Unit", VolumeUsage_Unit_name, VolumeUsage_Unit_value)
proto.RegisterEnum("csi.v1.NodeServiceCapability_RPC_Type", NodeServiceCapability_RPC_Type_name, NodeServiceCapability_RPC_Type_value)
+ proto.RegisterEnum("csi.v1.GroupControllerServiceCapability_RPC_Type", GroupControllerServiceCapability_RPC_Type_name, GroupControllerServiceCapability_RPC_Type_value)
proto.RegisterType((*GetPluginInfoRequest)(nil), "csi.v1.GetPluginInfoRequest")
proto.RegisterType((*GetPluginInfoResponse)(nil), "csi.v1.GetPluginInfoResponse")
proto.RegisterMapType((map[string]string)(nil), "csi.v1.GetPluginInfoResponse.ManifestEntry")
@@ -5046,6 +5749,21 @@ func init() {
proto.RegisterType((*NodeExpandVolumeRequest)(nil), "csi.v1.NodeExpandVolumeRequest")
proto.RegisterMapType((map[string]string)(nil), "csi.v1.NodeExpandVolumeRequest.SecretsEntry")
proto.RegisterType((*NodeExpandVolumeResponse)(nil), "csi.v1.NodeExpandVolumeResponse")
+ proto.RegisterType((*GroupControllerGetCapabilitiesRequest)(nil), "csi.v1.GroupControllerGetCapabilitiesRequest")
+ proto.RegisterType((*GroupControllerGetCapabilitiesResponse)(nil), "csi.v1.GroupControllerGetCapabilitiesResponse")
+ proto.RegisterType((*GroupControllerServiceCapability)(nil), "csi.v1.GroupControllerServiceCapability")
+ proto.RegisterType((*GroupControllerServiceCapability_RPC)(nil), "csi.v1.GroupControllerServiceCapability.RPC")
+ proto.RegisterType((*CreateVolumeGroupSnapshotRequest)(nil), "csi.v1.CreateVolumeGroupSnapshotRequest")
+ proto.RegisterMapType((map[string]string)(nil), "csi.v1.CreateVolumeGroupSnapshotRequest.ParametersEntry")
+ proto.RegisterMapType((map[string]string)(nil), "csi.v1.CreateVolumeGroupSnapshotRequest.SecretsEntry")
+ proto.RegisterType((*CreateVolumeGroupSnapshotResponse)(nil), "csi.v1.CreateVolumeGroupSnapshotResponse")
+ proto.RegisterType((*VolumeGroupSnapshot)(nil), "csi.v1.VolumeGroupSnapshot")
+ proto.RegisterType((*DeleteVolumeGroupSnapshotRequest)(nil), "csi.v1.DeleteVolumeGroupSnapshotRequest")
+ proto.RegisterMapType((map[string]string)(nil), "csi.v1.DeleteVolumeGroupSnapshotRequest.SecretsEntry")
+ proto.RegisterType((*DeleteVolumeGroupSnapshotResponse)(nil), "csi.v1.DeleteVolumeGroupSnapshotResponse")
+ proto.RegisterType((*GetVolumeGroupSnapshotRequest)(nil), "csi.v1.GetVolumeGroupSnapshotRequest")
+ proto.RegisterMapType((map[string]string)(nil), "csi.v1.GetVolumeGroupSnapshotRequest.SecretsEntry")
+ proto.RegisterType((*GetVolumeGroupSnapshotResponse)(nil), "csi.v1.GetVolumeGroupSnapshotResponse")
proto.RegisterExtension(E_AlphaEnum)
proto.RegisterExtension(E_AlphaEnumValue)
proto.RegisterExtension(E_CsiSecret)
@@ -5060,245 +5778,269 @@ func init() {
}
var fileDescriptor_9cdb00adce470e01 = []byte{
- // 3796 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x3b, 0x4b, 0x6c, 0x23, 0x47,
- 0x76, 0x6a, 0xfe, 0x24, 0x3d, 0x4a, 0x1a, 0xaa, 0x28, 0x69, 0x38, 0x2d, 0x69, 0xa4, 0xe9, 0xf1,
- 0x78, 0xe5, 0xf1, 0x0c, 0x67, 0xad, 0xb5, 0x8d, 0x58, 0x1e, 0xef, 0x9a, 0xa4, 0x38, 0x12, 0x77,
- 0x28, 0x52, 0x6e, 0x52, 0x33, 0x3b, 0x93, 0x18, 0xed, 0x16, 0x59, 0xe2, 0x34, 0x4c, 0x76, 0xd3,
- 0xdd, 0x4d, 0x45, 0xda, 0x4b, 0x82, 0x04, 0x39, 0x04, 0xb9, 0xe4, 0xb6, 0xce, 0x29, 0x8b, 0x24,
- 0xc7, 0x5d, 0xec, 0x21, 0x08, 0x72, 0x0c, 0x90, 0x5b, 0x02, 0xe4, 0x73, 0x4b, 0x90, 0xcb, 0x1e,
- 0x02, 0xe4, 0x60, 0x24, 0x80, 0xcf, 0x39, 0x04, 0x41, 0x57, 0x55, 0x37, 0xfb, 0xcb, 0xcf, 0x48,
- 0x03, 0x1f, 0xf6, 0x24, 0xf6, 0xab, 0xf7, 0x5e, 0xbd, 0xaa, 0x7a, 0xef, 0xd5, 0xfb, 0x94, 0xe0,
- 0x83, 0x8e, 0x62, 0xbe, 0x1a, 0x9c, 0xe6, 0x5b, 0x5a, 0xef, 0x51, 0x4b, 0x53, 0x4d, 0x59, 0x51,
- 0xb1, 0xfe, 0xd0, 0x30, 0x35, 0x5d, 0xee, 0xe0, 0x87, 0x8a, 0x6a, 0x62, 0xfd, 0x4c, 0x6e, 0xe1,
- 0x47, 0x46, 0x1f, 0xb7, 0x1e, 0xb5, 0x0c, 0x25, 0xdf, 0xd7, 0x35, 0x53, 0x43, 0x29, 0xeb, 0xe7,
- 0xf9, 0x7b, 0xfc, 0x76, 0x47, 0xd3, 0x3a, 0x5d, 0xfc, 0x88, 0x40, 0x4f, 0x07, 0x67, 0x8f, 0xda,
- 0xd8, 0x68, 0xe9, 0x4a, 0xdf, 0xd4, 0x74, 0x8a, 0xc9, 0x6f, 0xf9, 0x31, 0x4c, 0xa5, 0x87, 0x0d,
- 0x53, 0xee, 0xf5, 0x19, 0xc2, 0x6d, 0x3f, 0xc2, 0xef, 0xea, 0x72, 0xbf, 0x8f, 0x75, 0x83, 0x8e,
- 0x0b, 0x6b, 0xb0, 0x72, 0x80, 0xcd, 0xe3, 0xee, 0xa0, 0xa3, 0xa8, 0x15, 0xf5, 0x4c, 0x13, 0xf1,
- 0x57, 0x03, 0x6c, 0x98, 0xc2, 0xbf, 0x73, 0xb0, 0xea, 0x1b, 0x30, 0xfa, 0x9a, 0x6a, 0x60, 0x84,
- 0x20, 0xa1, 0xca, 0x3d, 0x9c, 0xe3, 0xb6, 0xb9, 0x9d, 0x79, 0x91, 0xfc, 0x46, 0xf7, 0x60, 0xe9,
- 0x1c, 0xab, 0x6d, 0x4d, 0x97, 0xce, 0xb1, 0x6e, 0x28, 0x9a, 0x9a, 0x8b, 0x91, 0xd1, 0x45, 0x0a,
- 0x7d, 0x46, 0x81, 0xe8, 0x00, 0xe6, 0x7a, 0xb2, 0xaa, 0x9c, 0x61, 0xc3, 0xcc, 0xc5, 0xb7, 0xe3,
- 0x3b, 0xe9, 0xdd, 0x77, 0xf3, 0x74, 0xa9, 0xf9, 0xd0, 0xb9, 0xf2, 0x47, 0x0c, 0xbb, 0xac, 0x9a,
- 0xfa, 0xa5, 0xe8, 0x10, 0xf3, 0x1f, 0xc3, 0xa2, 0x67, 0x08, 0x65, 0x20, 0xfe, 0x25, 0xbe, 0x64,
- 0x32, 0x59, 0x3f, 0xd1, 0x0a, 0x24, 0xcf, 0xe5, 0xee, 0x00, 0x33, 0x49, 0xe8, 0xc7, 0x5e, 0xec,
- 0xb7, 0x38, 0xe1, 0x36, 0x6c, 0x38, 0xb3, 0x95, 0xe4, 0xbe, 0x7c, 0xaa, 0x74, 0x15, 0x53, 0xc1,
- 0x86, 0xbd, 0xf4, 0xcf, 0x61, 0x33, 0x62, 0x9c, 0xed, 0xc0, 0x63, 0x58, 0x68, 0xb9, 0xe0, 0x39,
- 0x8e, 0x2c, 0x25, 0x67, 0x2f, 0xc5, 0x47, 0x79, 0x29, 0x7a, 0xb0, 0x85, 0x7f, 0x8e, 0x43, 0xc6,
- 0x8f, 0x82, 0x1e, 0xc3, 0xac, 0x81, 0xf5, 0x73, 0xa5, 0x45, 0xf7, 0x35, 0xbd, 0xbb, 0x1d, 0xc5,
- 0x2d, 0xdf, 0xa0, 0x78, 0x87, 0x33, 0xa2, 0x4d, 0x82, 0x4e, 0x20, 0x73, 0xae, 0x75, 0x07, 0x3d,
- 0x2c, 0xe1, 0x8b, 0xbe, 0xac, 0x3a, 0x07, 0x90, 0xde, 0xdd, 0x89, 0x64, 0xf3, 0x8c, 0x10, 0x94,
- 0x6d, 0xfc, 0xc3, 0x19, 0xf1, 0xc6, 0xb9, 0x17, 0xc4, 0xff, 0x8c, 0x83, 0x59, 0x36, 0x1b, 0xfa,
- 0x08, 0x12, 0xe6, 0x65, 0x9f, 0x4a, 0xb7, 0xb4, 0x7b, 0x6f, 0x9c, 0x74, 0xf9, 0xe6, 0x65, 0x1f,
- 0x8b, 0x84, 0x44, 0xf8, 0x0c, 0x12, 0xd6, 0x17, 0x4a, 0xc3, 0xec, 0x49, 0xed, 0x69, 0xad, 0xfe,
- 0xbc, 0x96, 0x99, 0x41, 0x6b, 0x80, 0x4a, 0xf5, 0x5a, 0x53, 0xac, 0x57, 0xab, 0x65, 0x51, 0x6a,
- 0x94, 0xc5, 0x67, 0x95, 0x52, 0x39, 0xc3, 0xa1, 0xb7, 0x60, 0xfb, 0x59, 0xbd, 0x7a, 0x72, 0x54,
- 0x96, 0x0a, 0xa5, 0x52, 0xb9, 0xd1, 0xa8, 0x14, 0x2b, 0xd5, 0x4a, 0xf3, 0x85, 0x54, 0xaa, 0xd7,
- 0x1a, 0x4d, 0xb1, 0x50, 0xa9, 0x35, 0x1b, 0x99, 0x18, 0xff, 0x07, 0x1c, 0xdc, 0xf0, 0x2d, 0x00,
- 0x15, 0x3c, 0x12, 0x3e, 0x9c, 0x74, 0xe1, 0x6e, 0x49, 0x1f, 0x84, 0x49, 0x0a, 0x90, 0xaa, 0xd7,
- 0xaa, 0x95, 0x9a, 0x25, 0x5d, 0x1a, 0x66, 0xeb, 0x4f, 0x9e, 0x90, 0x8f, 0x58, 0x31, 0x45, 0x27,
- 0x14, 0x96, 0x60, 0xe1, 0x58, 0xd7, 0x4e, 0xb1, 0xad, 0x3f, 0x05, 0x58, 0x64, 0xdf, 0x4c, 0x5f,
- 0xbe, 0x0f, 0x49, 0x1d, 0xcb, 0xed, 0x4b, 0x76, 0xb4, 0x7c, 0x9e, 0xda, 0x64, 0xde, 0xb6, 0xc9,
- 0x7c, 0x51, 0xd3, 0xba, 0xcf, 0x2c, 0xfd, 0x14, 0x29, 0xa2, 0xf0, 0x6d, 0x02, 0xb2, 0x25, 0x1d,
- 0xcb, 0x26, 0xa6, 0xd2, 0x32, 0xd6, 0xa1, 0xb6, 0xf7, 0x18, 0x96, 0x2c, 0xfd, 0x6a, 0x29, 0xe6,
- 0xa5, 0xa4, 0xcb, 0x6a, 0x07, 0xb3, 0xa3, 0x5f, 0xb5, 0x77, 0xa0, 0xc4, 0x46, 0x45, 0x6b, 0x50,
- 0x5c, 0x6c, 0xb9, 0x3f, 0x51, 0x05, 0xb2, 0x4c, 0x75, 0x3c, 0x2a, 0x1d, 0xf7, 0xaa, 0x34, 0x95,
- 0xc2, 0xa5, 0xd2, 0xe8, 0xdc, 0x0b, 0x51, 0xb0, 0x81, 0x9e, 0x02, 0xf4, 0x65, 0x5d, 0xee, 0x61,
- 0x13, 0xeb, 0x46, 0x2e, 0xe1, 0xb5, 0xef, 0x90, 0xd5, 0xe4, 0x8f, 0x1d, 0x6c, 0x6a, 0xdf, 0x2e,
- 0x72, 0x74, 0x60, 0x19, 0x44, 0x4b, 0xc7, 0xa6, 0x91, 0x4b, 0x12, 0x4e, 0x3b, 0xa3, 0x38, 0x35,
- 0x28, 0x2a, 0x61, 0x53, 0x8c, 0x7f, 0x5d, 0xe4, 0x44, 0x9b, 0x1a, 0xd5, 0x61, 0xd5, 0x5e, 0xa0,
- 0xa6, 0x9a, 0x58, 0x35, 0x25, 0x43, 0x1b, 0xe8, 0x2d, 0x9c, 0x4b, 0x91, 0x5d, 0x5a, 0xf7, 0x2d,
- 0x91, 0xe2, 0x34, 0x08, 0x8a, 0xc8, 0xb6, 0xc6, 0x03, 0x44, 0x2f, 0x81, 0x97, 0x5b, 0x2d, 0x6c,
- 0x18, 0x0a, 0xdd, 0x0b, 0x49, 0xc7, 0x5f, 0x0d, 0x14, 0x1d, 0xf7, 0xb0, 0x6a, 0x1a, 0xb9, 0x59,
- 0x2f, 0xd7, 0xa6, 0xd6, 0xd7, 0xba, 0x5a, 0xe7, 0x52, 0x1c, 0xe2, 0x88, 0xb7, 0x3c, 0xe4, 0xae,
- 0x11, 0x83, 0xff, 0x04, 0x6e, 0xf8, 0x36, 0x65, 0x1a, 0xcf, 0xc6, 0xef, 0xc1, 0x82, 0x7b, 0x27,
- 0xa6, 0xf2, 0x8a, 0x7f, 0x12, 0x83, 0x6c, 0xc8, 0x1e, 0xa0, 0x43, 0x98, 0x33, 0x54, 0xb9, 0x6f,
- 0xbc, 0xd2, 0x4c, 0xa6, 0xbf, 0xf7, 0x47, 0x6c, 0x59, 0xbe, 0xc1, 0x70, 0xe9, 0xe7, 0xe1, 0x8c,
- 0xe8, 0x50, 0xa3, 0x22, 0xa4, 0xe8, 0x7e, 0xfa, 0x7d, 0x53, 0x18, 0x1f, 0x0a, 0x73, 0xb8, 0x30,
- 0x4a, 0xfe, 0x3d, 0x58, 0xf2, 0xce, 0x80, 0xb6, 0x20, 0x6d, 0xcf, 0x20, 0x29, 0x6d, 0xb6, 0x56,
- 0xb0, 0x41, 0x95, 0x36, 0xff, 0x2e, 0x2c, 0xb8, 0x99, 0xa1, 0x75, 0x98, 0x67, 0x0a, 0xe1, 0xa0,
- 0xcf, 0x51, 0x40, 0xa5, 0xed, 0xd8, 0xf4, 0x0f, 0x61, 0xc5, 0xab, 0x67, 0xcc, 0x94, 0xdf, 0x76,
- 0xd6, 0x40, 0xf7, 0x62, 0xc9, 0xbb, 0x06, 0x5b, 0x4e, 0xe1, 0xcf, 0x93, 0x90, 0xf1, 0x1b, 0x0d,
- 0x7a, 0x0c, 0xc9, 0xd3, 0xae, 0xd6, 0xfa, 0x92, 0xd1, 0xbe, 0x15, 0x65, 0x5d, 0xf9, 0xa2, 0x85,
- 0x45, 0xa1, 0x87, 0x33, 0x22, 0x25, 0xb2, 0xa8, 0x7b, 0xda, 0x40, 0x35, 0xd9, 0xee, 0x45, 0x53,
- 0x1f, 0x59, 0x58, 0x43, 0x6a, 0x42, 0x84, 0xf6, 0x21, 0x4d, 0xd5, 0x4e, 0xea, 0x69, 0x6d, 0x9c,
- 0x8b, 0x13, 0x1e, 0x77, 0x23, 0x79, 0x14, 0x08, 0xee, 0x91, 0xd6, 0xc6, 0x22, 0xc8, 0xce, 0x6f,
- 0x7e, 0x11, 0xd2, 0x2e, 0xd9, 0xf8, 0x01, 0xa4, 0x5d, 0x93, 0xa1, 0x9b, 0x30, 0x7b, 0x66, 0x48,
- 0x8e, 0x13, 0x9e, 0x17, 0x53, 0x67, 0x06, 0xf1, 0xa7, 0x5b, 0x90, 0x26, 0x52, 0x48, 0x67, 0x5d,
- 0xb9, 0x63, 0xe4, 0x62, 0xdb, 0x71, 0xeb, 0x8c, 0x08, 0xe8, 0x89, 0x05, 0x41, 0x0f, 0x80, 0x39,
- 0x14, 0x89, 0xe2, 0x75, 0x74, 0x6d, 0xd0, 0x27, 0x42, 0xce, 0x8b, 0xec, 0x6a, 0x23, 0x13, 0x1d,
- 0x58, 0x70, 0xfe, 0xaf, 0x63, 0x00, 0x43, 0x01, 0xd1, 0x63, 0x48, 0x90, 0x35, 0x51, 0xc7, 0xbf,
- 0x33, 0xc1, 0x9a, 0xf2, 0x64, 0x61, 0x84, 0x4a, 0xf8, 0x2f, 0x0e, 0x12, 0x84, 0x8d, 0xff, 0x7a,
- 0x6a, 0x54, 0x6a, 0x07, 0xd5, 0xb2, 0x54, 0xab, 0xef, 0x97, 0xa5, 0xe7, 0x62, 0xa5, 0x59, 0x16,
- 0x33, 0x1c, 0x5a, 0x87, 0x9b, 0x6e, 0xb8, 0x58, 0x2e, 0xec, 0x97, 0x45, 0xa9, 0x5e, 0xab, 0xbe,
- 0xc8, 0xc4, 0x10, 0x0f, 0x6b, 0x47, 0x27, 0xd5, 0x66, 0x25, 0x38, 0x16, 0x47, 0x1b, 0x90, 0x73,
- 0x8d, 0x31, 0x1e, 0x8c, 0x6d, 0xc2, 0x62, 0xeb, 0x1a, 0xa5, 0x3f, 0xd9, 0x60, 0x12, 0x09, 0x70,
- 0xcb, 0x3d, 0xa7, 0x97, 0x36, 0xc5, 0xc7, 0x7f, 0x5e, 0xe4, 0xd0, 0x1d, 0xc8, 0xb9, 0x71, 0x3c,
- 0x1c, 0x66, 0x09, 0x4a, 0x71, 0xd1, 0xd1, 0x00, 0xa2, 0xe1, 0xcf, 0x61, 0xd1, 0x73, 0x31, 0x58,
- 0x31, 0x1c, 0xf3, 0x64, 0x6d, 0xe9, 0xf4, 0xd2, 0x24, 0x71, 0x0d, 0xb7, 0x13, 0x17, 0x17, 0x6d,
- 0x68, 0xd1, 0x02, 0x5a, 0x67, 0xd9, 0x55, 0x7a, 0x8a, 0xc9, 0x70, 0x62, 0x04, 0x07, 0x08, 0x88,
- 0x20, 0x08, 0xbf, 0x8e, 0x41, 0x8a, 0x29, 0xc4, 0x3d, 0xd7, 0xd5, 0xe4, 0x61, 0x69, 0x43, 0x29,
- 0x4b, 0x8f, 0x45, 0xc6, 0xbc, 0x16, 0x89, 0x0e, 0x61, 0xc9, 0xed, 0xbf, 0x2f, 0xec, 0xc8, 0xf1,
- 0x8e, 0xf7, 0x9c, 0xdd, 0x4e, 0xe4, 0x82, 0xc5, 0x8b, 0x8b, 0xe7, 0x6e, 0x18, 0x2a, 0xc2, 0x92,
- 0xef, 0x0a, 0x48, 0x8c, 0xbf, 0x02, 0x16, 0x5b, 0x1e, 0x6f, 0x58, 0x80, 0xac, 0xed, 0xbd, 0xbb,
- 0x58, 0x32, 0x99, 0x77, 0x67, 0x57, 0x54, 0x26, 0xe0, 0xf5, 0xd1, 0x10, 0xd9, 0x86, 0xf1, 0x9f,
- 0x02, 0x0a, 0xca, 0x3a, 0x95, 0xab, 0x1e, 0x40, 0x36, 0xe4, 0x5e, 0x41, 0x79, 0x98, 0x27, 0x47,
- 0x65, 0x28, 0x26, 0x66, 0x31, 0x69, 0x50, 0xa2, 0x21, 0x8a, 0x85, 0xdf, 0xd7, 0xf1, 0x19, 0xd6,
- 0x75, 0xdc, 0x26, 0x36, 0x19, 0x8a, 0xef, 0xa0, 0x08, 0x7f, 0xc8, 0xc1, 0x9c, 0x0d, 0x47, 0x7b,
- 0x30, 0x67, 0xe0, 0x0e, 0xbd, 0xf3, 0xe8, 0x5c, 0xb7, 0xfd, 0xb4, 0xf9, 0x06, 0x43, 0x60, 0xd1,
- 0xbb, 0x8d, 0x6f, 0x45, 0xef, 0x9e, 0xa1, 0xa9, 0x16, 0xff, 0xb7, 0x1c, 0x64, 0xf7, 0x71, 0x17,
- 0xfb, 0x43, 0xa3, 0x51, 0x6e, 0xdd, 0x1d, 0x4d, 0xc4, 0xbc, 0xd1, 0x44, 0x08, 0xab, 0x11, 0xd1,
- 0xc4, 0x95, 0x6e, 0xd8, 0x35, 0x58, 0xf1, 0xce, 0x46, 0xef, 0x14, 0xe1, 0x7f, 0xe2, 0x70, 0xdb,
- 0xd2, 0x05, 0x5d, 0xeb, 0x76, 0xb1, 0x7e, 0x3c, 0x38, 0xed, 0x2a, 0xc6, 0xab, 0x29, 0x16, 0x77,
- 0x13, 0x66, 0x55, 0xad, 0xed, 0x32, 0x9e, 0x94, 0xf5, 0x59, 0x69, 0xa3, 0x32, 0x2c, 0xfb, 0x63,
- 0xbb, 0x4b, 0xe6, 0xf9, 0xa3, 0x23, 0xbb, 0xcc, 0xb9, 0xff, 0xda, 0xe2, 0x61, 0xce, 0x8a, 0x4a,
- 0x35, 0xb5, 0x7b, 0x49, 0x2c, 0x66, 0x4e, 0x74, 0xbe, 0x91, 0xe8, 0x0f, 0xd3, 0x7e, 0xe0, 0x84,
- 0x69, 0x23, 0x57, 0x34, 0x2a, 0x62, 0xfb, 0x22, 0x60, 0xf1, 0x29, 0xc2, 0xfa, 0xa3, 0x09, 0x59,
- 0x8f, 0xf5, 0x04, 0x57, 0x39, 0xc5, 0x6b, 0x30, 0xdf, 0x7f, 0xe4, 0x60, 0x2b, 0x72, 0x09, 0x2c,
- 0xce, 0x68, 0xc3, 0x8d, 0x3e, 0x1d, 0x70, 0x36, 0x81, 0x5a, 0xd9, 0xc7, 0x63, 0x37, 0x81, 0xa5,
- 0xce, 0x0c, 0xea, 0xd9, 0x86, 0xa5, 0xbe, 0x07, 0xc8, 0x17, 0x20, 0x1b, 0x82, 0x36, 0xd5, 0x62,
- 0xbe, 0xe1, 0x60, 0x7b, 0x28, 0xca, 0x89, 0xda, 0xbf, 0x3e, 0xf5, 0x6d, 0x0e, 0x75, 0x8b, 0xba,
- 0xfc, 0x0f, 0x82, 0x6b, 0x0f, 0x9f, 0xf0, 0x4d, 0x59, 0xf0, 0x5d, 0xb8, 0x33, 0x62, 0x6a, 0x66,
- 0xce, 0xbf, 0x4e, 0xc0, 0x9d, 0x67, 0x72, 0x57, 0x69, 0x3b, 0xd1, 0x63, 0x48, 0x91, 0x61, 0xf4,
- 0x96, 0xb4, 0x02, 0x16, 0x40, 0xbd, 0xd6, 0x63, 0xc7, 0x6a, 0xc7, 0xf1, 0x9f, 0xe0, 0x3a, 0xbc,
- 0xc6, 0xcc, 0xef, 0x45, 0x48, 0xe6, 0xf7, 0xd1, 0xe4, 0xb2, 0x8e, 0xca, 0x03, 0x4f, 0xfc, 0x0e,
- 0xe6, 0xc3, 0xc9, 0xf9, 0x8e, 0xd0, 0x82, 0x2b, 0x5b, 0xf1, 0x77, 0x99, 0xaa, 0xfd, 0x7d, 0x02,
- 0x84, 0x51, 0xab, 0x67, 0x3e, 0x44, 0x84, 0xf9, 0x96, 0xa6, 0x9e, 0x29, 0x7a, 0x0f, 0xb7, 0x59,
- 0xca, 0xf1, 0xfe, 0x24, 0x9b, 0xc7, 0x1c, 0x48, 0xc9, 0xa6, 0x15, 0x87, 0x6c, 0x50, 0x0e, 0x66,
- 0x7b, 0xd8, 0x30, 0xe4, 0x8e, 0x2d, 0x96, 0xfd, 0xc9, 0xff, 0x32, 0x0e, 0xf3, 0x0e, 0x09, 0x52,
- 0x03, 0x1a, 0x4c, 0xdd, 0xd7, 0xc1, 0xeb, 0x08, 0xf0, 0xfa, 0xca, 0x1c, 0x7b, 0x0d, 0x65, 0x6e,
- 0x7b, 0x94, 0x99, 0x9a, 0xc3, 0xfe, 0x6b, 0x89, 0x3d, 0x42, 0xaf, 0xbf, 0x73, 0x05, 0x14, 0x7e,
- 0x07, 0x50, 0x55, 0x31, 0x58, 0xea, 0xe6, 0xb8, 0x25, 0x2b, 0x53, 0x93, 0x2f, 0x24, 0xac, 0x9a,
- 0xba, 0xc2, 0xc2, 0xf5, 0xa4, 0x08, 0x3d, 0xf9, 0xa2, 0x4c, 0x21, 0x56, 0x48, 0x6f, 0x98, 0xb2,
- 0x6e, 0x2a, 0x6a, 0x47, 0x32, 0xb5, 0x2f, 0xb1, 0x53, 0xe9, 0xb5, 0xa1, 0x4d, 0x0b, 0x28, 0xfc,
- 0x77, 0x0c, 0xb2, 0x1e, 0xf6, 0x4c, 0x27, 0x3f, 0x86, 0xd9, 0x21, 0x6f, 0x4f, 0x18, 0x1f, 0x82,
- 0x9d, 0xa7, 0xdb, 0x66, 0x53, 0xa0, 0x4d, 0x00, 0x15, 0x5f, 0x98, 0x9e, 0x79, 0xe7, 0x2d, 0x08,
- 0x99, 0x93, 0xff, 0x23, 0xce, 0xc9, 0xf4, 0x4d, 0xd9, 0x1c, 0x90, 0xac, 0x92, 0xb9, 0x68, 0xdc,
- 0x96, 0xd8, 0x1d, 0x43, 0xe7, 0x9d, 0x17, 0x33, 0xce, 0x48, 0x8d, 0xdc, 0x36, 0x06, 0x3a, 0x70,
- 0x8a, 0xa8, 0x2d, 0x4d, 0x6d, 0x2b, 0xe6, 0xb0, 0x88, 0x7a, 0x33, 0x90, 0x20, 0xd0, 0xe1, 0xa2,
- 0x95, 0x57, 0xd9, 0x65, 0x53, 0x07, 0xca, 0x7f, 0x05, 0x49, 0x7a, 0x1c, 0x13, 0x16, 0x0b, 0xd0,
- 0xa7, 0x90, 0x32, 0x88, 0xc4, 0xfe, 0xc2, 0x48, 0xd8, 0x9e, 0xb8, 0x57, 0x28, 0x32, 0x3a, 0xe1,
- 0x87, 0xc0, 0x0f, 0x2f, 0xa6, 0x03, 0x6c, 0x4e, 0x7e, 0xfd, 0xee, 0x59, 0x6b, 0x10, 0x7e, 0x16,
- 0x83, 0xf5, 0x50, 0x06, 0xd3, 0x95, 0x3d, 0xd0, 0xa1, 0x6f, 0x25, 0xdf, 0x0f, 0xde, 0xd8, 0x01,
- 0xe6, 0xa1, 0x2b, 0xe2, 0x7f, 0xff, 0x6a, 0x87, 0x59, 0x9c, 0xfa, 0x30, 0x03, 0xe7, 0x48, 0x77,
- 0xe6, 0x97, 0x31, 0x40, 0x07, 0xd8, 0x74, 0x52, 0x65, 0xb6, 0xa5, 0x11, 0xfe, 0x86, 0x7b, 0x0d,
- 0x7f, 0xf3, 0x63, 0x8f, 0xbf, 0xa1, 0x1e, 0xeb, 0xbe, 0xab, 0x2d, 0xe2, 0x9b, 0x7a, 0xe4, 0x6d,
- 0x19, 0x91, 0x9e, 0xd2, 0x98, 0x7f, 0xb2, 0xf4, 0xf4, 0x8a, 0x6e, 0xe5, 0x3f, 0x39, 0xc8, 0x7a,
- 0x84, 0x66, 0x1a, 0xf4, 0x10, 0x90, 0x7c, 0x2e, 0x2b, 0x5d, 0xd9, 0x12, 0xcc, 0x4e, 0xff, 0x59,
- 0x39, 0x60, 0xd9, 0x19, 0xb1, 0xc9, 0xd0, 0x53, 0xc8, 0xf6, 0xe4, 0x0b, 0xa5, 0x37, 0xe8, 0x49,
- 0x6c, 0x9f, 0x0d, 0xe5, 0xa7, 0x76, 0xe1, 0x70, 0x3d, 0x50, 0x40, 0xaf, 0xa8, 0xe6, 0x87, 0xef,
- 0xd3, 0x0a, 0xfa, 0x32, 0xa3, 0x63, 0xca, 0xa3, 0xfc, 0x14, 0xa3, 0x63, 0xc8, 0xf6, 0x14, 0x35,
- 0xc0, 0x2c, 0x3e, 0x96, 0x19, 0x35, 0xf0, 0x65, 0x46, 0x3c, 0xe4, 0x28, 0x08, 0xee, 0xa0, 0x97,
- 0x2d, 0xd7, 0xdf, 0x46, 0xea, 0xba, 0x83, 0xc5, 0x00, 0x0e, 0xdb, 0x96, 0x83, 0xd0, 0x56, 0xd2,
- 0xdd, 0xa0, 0xd9, 0xb0, 0xbe, 0x4a, 0x64, 0x57, 0xe9, 0xff, 0xe2, 0x6e, 0x0b, 0x0e, 0x60, 0xa3,
- 0x8f, 0x21, 0xae, 0xf7, 0x5b, 0xcc, 0x7c, 0xbf, 0x37, 0x01, 0xff, 0xbc, 0x78, 0x5c, 0x3a, 0x9c,
- 0x11, 0x2d, 0x2a, 0xfe, 0xcf, 0xe2, 0x10, 0x17, 0x8f, 0x4b, 0xe8, 0x53, 0x4f, 0x8b, 0xe5, 0xc1,
- 0x84, 0x5c, 0xdc, 0x1d, 0x96, 0x7f, 0x89, 0x85, 0xb5, 0x58, 0x72, 0xb0, 0x52, 0x12, 0xcb, 0x85,
- 0x66, 0x59, 0xda, 0x2f, 0x57, 0xcb, 0xcd, 0xb2, 0x44, 0x5b, 0x40, 0x19, 0x0e, 0x6d, 0x40, 0xee,
- 0xf8, 0xa4, 0x58, 0xad, 0x34, 0x0e, 0xa5, 0x93, 0x9a, 0xfd, 0x8b, 0x8d, 0xc6, 0x50, 0x06, 0x16,
- 0xaa, 0x95, 0x46, 0x93, 0x01, 0x1a, 0x99, 0xb8, 0x05, 0x39, 0x28, 0x37, 0xa5, 0x52, 0xe1, 0xb8,
- 0x50, 0xaa, 0x34, 0x5f, 0x64, 0x12, 0x88, 0x87, 0x35, 0x2f, 0xef, 0x46, 0xad, 0x70, 0xdc, 0x38,
- 0xac, 0x37, 0x33, 0x49, 0x84, 0x60, 0x89, 0xd0, 0xdb, 0xa0, 0x46, 0x26, 0x65, 0x71, 0x28, 0x55,
- 0xeb, 0x35, 0x47, 0x86, 0x59, 0xb4, 0x02, 0x19, 0x7b, 0x66, 0xb1, 0x5c, 0xd8, 0x27, 0x05, 0xbd,
- 0x39, 0xb4, 0x0c, 0x8b, 0xe5, 0x9f, 0x1c, 0x17, 0x6a, 0xfb, 0x36, 0xe2, 0x3c, 0xda, 0x86, 0x0d,
- 0xb7, 0x38, 0x12, 0xa3, 0x2a, 0xef, 0x93, 0xa2, 0x5c, 0x23, 0x03, 0xe8, 0x16, 0x64, 0x58, 0x77,
- 0xab, 0x54, 0xaf, 0xed, 0x57, 0x9a, 0x95, 0x7a, 0x2d, 0x93, 0xa6, 0x15, 0xbc, 0x2c, 0x80, 0x25,
- 0x39, 0x63, 0xb6, 0x30, 0xbe, 0xac, 0xb7, 0x48, 0xcb, 0x7a, 0x76, 0xc5, 0xfa, 0x9b, 0x18, 0xac,
- 0xd2, 0x92, 0xb5, 0x5d, 0x20, 0xb7, 0x7d, 0xd5, 0x0e, 0x64, 0x68, 0xbd, 0x4b, 0xf2, 0xdf, 0x02,
- 0x4b, 0x14, 0xfe, 0xcc, 0xce, 0x3b, 0xec, 0xf6, 0x52, 0xcc, 0xd5, 0x5e, 0xaa, 0xf8, 0xb3, 0xb0,
- 0xfb, 0xde, 0x46, 0x8c, 0x6f, 0xb6, 0x51, 0x89, 0xfd, 0x51, 0x48, 0x9a, 0xf0, 0x70, 0x34, 0xb7,
- 0x51, 0x21, 0xd4, 0x55, 0xb2, 0xf8, 0x2b, 0x7a, 0xb9, 0x27, 0xb0, 0xe6, 0x97, 0x97, 0x19, 0xf4,
- 0x83, 0x40, 0xbb, 0xc4, 0x71, 0xbb, 0x0e, 0xae, 0x83, 0x21, 0xfc, 0x1b, 0x07, 0x73, 0x36, 0xd8,
- 0x0a, 0x6f, 0x2c, 0xbf, 0xe4, 0xa9, 0x94, 0xce, 0x5b, 0x10, 0xa7, 0xf0, 0xea, 0x6e, 0x74, 0xc4,
- 0xfc, 0x8d, 0x8e, 0xd0, 0x73, 0x8e, 0x87, 0x9e, 0xf3, 0x8f, 0x60, 0xb1, 0x65, 0x89, 0xaf, 0x68,
- 0xaa, 0x64, 0x2a, 0x3d, 0xbb, 0x10, 0x1a, 0x6c, 0x4c, 0x36, 0xed, 0xd7, 0x04, 0xe2, 0x82, 0x4d,
- 0x60, 0x81, 0xd0, 0x36, 0x2c, 0x90, 0x46, 0xa5, 0x64, 0x6a, 0xd2, 0xc0, 0xc0, 0xb9, 0x24, 0x29,
- 0x0b, 0x01, 0x81, 0x35, 0xb5, 0x13, 0x03, 0x0b, 0x7f, 0xc7, 0xc1, 0x2a, 0xad, 0x76, 0xf9, 0xd5,
- 0x71, 0x5c, 0xc3, 0xc6, 0xad, 0x71, 0xbe, 0xdb, 0x30, 0x94, 0xe1, 0x9b, 0x4a, 0xf6, 0x73, 0xb0,
- 0xe6, 0x9f, 0x8f, 0x65, 0xf8, 0xbf, 0x8a, 0xc1, 0x8a, 0x15, 0x9a, 0xd9, 0x03, 0xd7, 0x1d, 0x3d,
- 0x4f, 0x71, 0x92, 0xbe, 0xcd, 0x4c, 0x04, 0x36, 0xf3, 0xd0, 0x9f, 0x3f, 0xbf, 0xe3, 0x0e, 0x2e,
- 0xfd, 0x2b, 0x78, 0x53, 0x7b, 0xf9, 0x0b, 0x0e, 0x56, 0x7d, 0xf3, 0x31, 0x7b, 0xf9, 0xc4, 0x9f,
- 0x10, 0xdc, 0x8d, 0x90, 0xef, 0xb5, 0x52, 0x82, 0x0f, 0xec, 0x50, 0x7c, 0x3a, 0xb3, 0xfc, 0xd7,
- 0x18, 0x6c, 0x0e, 0x2f, 0x35, 0xf2, 0x54, 0xa0, 0x3d, 0x45, 0x45, 0xeb, 0x6a, 0x1d, 0xf9, 0xcf,
- 0xfc, 0x0e, 0x77, 0x37, 0x78, 0xcf, 0x86, 0x88, 0x34, 0xca, 0xf1, 0x86, 0x16, 0x82, 0x13, 0xd3,
- 0x16, 0x82, 0xaf, 0xa4, 0x01, 0xbf, 0xe7, 0xae, 0x71, 0x7b, 0xc5, 0x67, 0x9a, 0x30, 0x61, 0xb3,
- 0xe8, 0x43, 0xb8, 0x49, 0xa2, 0x7f, 0xe7, 0xa5, 0x8b, 0xdd, 0x7f, 0xa7, 0x2e, 0x71, 0x4e, 0x5c,
- 0xb5, 0x86, 0x9d, 0xe7, 0x1d, 0xac, 0x41, 0xd2, 0x16, 0xbe, 0x4d, 0xc0, 0x9a, 0x95, 0x1d, 0x34,
- 0x4c, 0xb9, 0x33, 0x4d, 0xeb, 0xe0, 0xb7, 0x83, 0x95, 0xd8, 0x98, 0xf7, 0x58, 0xc2, 0xb9, 0x4e,
- 0x52, 0x80, 0x45, 0x79, 0xc8, 0x1a, 0xa6, 0xdc, 0x21, 0xee, 0x40, 0xd6, 0x3b, 0xd8, 0x94, 0xfa,
- 0xb2, 0xf9, 0x8a, 0xd9, 0xfa, 0x32, 0x1b, 0x6a, 0x92, 0x91, 0x63, 0xd9, 0x7c, 0x75, 0x4d, 0x07,
- 0x89, 0x7e, 0xec, 0x77, 0x0a, 0xef, 0x8e, 0x59, 0xcb, 0x08, 0xdd, 0xfa, 0x49, 0x44, 0xb5, 0xfe,
- 0xbd, 0x31, 0x2c, 0xc7, 0x57, 0xe9, 0xaf, 0x5e, 0x9d, 0xfe, 0x8e, 0x0b, 0xfd, 0xb7, 0xe0, 0x66,
- 0x60, 0xf1, 0xec, 0x0a, 0xe9, 0x40, 0xce, 0x1a, 0x3a, 0x51, 0x8d, 0x29, 0xd5, 0x31, 0x42, 0x63,
- 0x62, 0x11, 0x1a, 0x23, 0xac, 0xc3, 0xad, 0x90, 0x89, 0x98, 0x14, 0x7f, 0x93, 0xa4, 0x62, 0x4c,
- 0xdf, 0x73, 0xfa, 0x3c, 0xca, 0x2a, 0xde, 0x77, 0x1f, 0x7b, 0x68, 0x7b, 0xe6, 0x4d, 0xd8, 0xc5,
- 0x16, 0xa4, 0xdd, 0x78, 0xec, 0x1a, 0x34, 0xc7, 0x18, 0x4e, 0xf2, 0x4a, 0xad, 0xb0, 0x94, 0xaf,
- 0x15, 0x56, 0x1d, 0x1a, 0xd5, 0xac, 0x37, 0xb4, 0x8d, 0xdc, 0x8a, 0x11, 0x66, 0xf5, 0x32, 0x60,
- 0x56, 0x73, 0xde, 0xfe, 0x5a, 0x24, 0xd3, 0xdf, 0x00, 0xc3, 0x62, 0x4a, 0x1d, 0xda, 0xf8, 0x12,
- 0x5e, 0x02, 0x4f, 0x35, 0x7e, 0xfa, 0x56, 0x94, 0x4f, 0x8d, 0x62, 0x7e, 0x35, 0x12, 0x36, 0x61,
- 0x3d, 0x94, 0x37, 0x9b, 0xfa, 0x8f, 0x39, 0x2a, 0x98, 0x53, 0xe3, 0x6a, 0x98, 0xb2, 0x69, 0x4c,
- 0x3a, 0x35, 0x1b, 0x74, 0x4f, 0x4d, 0x41, 0x44, 0x83, 0xa7, 0x34, 0x09, 0xe1, 0x4f, 0x39, 0xba,
- 0x0f, 0x7e, 0x59, 0xd8, 0x6d, 0xfb, 0x0e, 0x24, 0x07, 0xa4, 0x8c, 0x4f, 0xa3, 0xae, 0xac, 0xd7,
- 0x08, 0x4e, 0xac, 0x21, 0x91, 0x62, 0x5c, 0x5b, 0x61, 0x54, 0xf8, 0x15, 0x07, 0x69, 0x17, 0x7f,
- 0xb4, 0x01, 0xf3, 0x4e, 0xe5, 0xc7, 0xce, 0x77, 0x1c, 0x80, 0x75, 0xfc, 0xa6, 0x66, 0xca, 0x5d,
- 0xf6, 0xc4, 0x84, 0x7e, 0x58, 0x29, 0xea, 0xc0, 0xc0, 0x34, 0x1c, 0x8e, 0x8b, 0xe4, 0x37, 0x7a,
- 0x00, 0x89, 0x81, 0xaa, 0x98, 0xc4, 0xec, 0x97, 0xfc, 0xf6, 0x4c, 0xa6, 0xca, 0x9f, 0xa8, 0x8a,
- 0x29, 0x12, 0x2c, 0xe1, 0x3e, 0x24, 0xac, 0x2f, 0x6f, 0x05, 0x62, 0x1e, 0x92, 0xc5, 0x17, 0xcd,
- 0x72, 0x23, 0xc3, 0x21, 0x80, 0x54, 0x85, 0xe6, 0xeb, 0x31, 0xa1, 0x6a, 0x3f, 0x33, 0x75, 0x16,
- 0x61, 0xb9, 0x00, 0xf9, 0x54, 0xd5, 0xf4, 0x9e, 0xdc, 0x25, 0x32, 0xcf, 0x89, 0xce, 0x77, 0x74,
- 0x77, 0x84, 0xd6, 0x12, 0x37, 0x9c, 0x13, 0x09, 0xab, 0x17, 0x7d, 0x41, 0x75, 0x2b, 0xaa, 0x52,
- 0x54, 0x08, 0xad, 0x14, 0x6d, 0x7a, 0x6e, 0xd9, 0x31, 0x35, 0xa2, 0x7f, 0x88, 0xc1, 0x6a, 0x28,
- 0x1e, 0xfa, 0xc0, 0x5d, 0x1d, 0xba, 0x33, 0x92, 0xa7, 0xbb, 0x2e, 0xf4, 0x2d, 0x47, 0xeb, 0x42,
- 0x7b, 0x9e, 0xba, 0xd0, 0xdb, 0x63, 0xe9, 0xdd, 0x15, 0xa1, 0x5f, 0x70, 0x11, 0x15, 0xa1, 0x46,
- 0xb3, 0x70, 0x50, 0x96, 0x4e, 0x6a, 0xf4, 0xaf, 0x53, 0x11, 0x5a, 0x81, 0xcc, 0xb0, 0x4e, 0x22,
- 0x35, 0x9a, 0x85, 0x66, 0x23, 0x13, 0x0b, 0x56, 0x63, 0xe2, 0xa1, 0xb5, 0x96, 0xc4, 0xf8, 0xb2,
- 0x4a, 0x92, 0xa2, 0xac, 0x01, 0x62, 0xd4, 0x47, 0xf5, 0x93, 0x5a, 0x53, 0x3a, 0x10, 0xeb, 0x27,
- 0xc7, 0x99, 0x94, 0x53, 0x6e, 0x59, 0x01, 0xc4, 0x4e, 0xcb, 0xfd, 0x6a, 0xfe, 0x2f, 0x38, 0xc8,
- 0x7a, 0xc0, 0xec, 0xf0, 0x5c, 0x3d, 0x6e, 0xce, 0xd3, 0xe3, 0x7e, 0x04, 0x2b, 0x56, 0xc6, 0x48,
- 0x2d, 0xc5, 0x90, 0xfa, 0x58, 0x27, 0xb5, 0x6d, 0xa6, 0xf3, 0xcb, 0x3d, 0xf9, 0x82, 0xd5, 0xff,
- 0x8f, 0xb1, 0x6e, 0x31, 0xbe, 0x86, 0x0a, 0xaf, 0xf0, 0x75, 0x9c, 0xc6, 0x25, 0x53, 0xe7, 0x35,
- 0x63, 0x7d, 0x54, 0x30, 0xf1, 0x89, 0x4f, 0x91, 0xf8, 0x44, 0x78, 0xb8, 0xc4, 0x54, 0xc1, 0xf0,
- 0xf4, 0x77, 0x7a, 0x6d, 0x78, 0x6f, 0xd3, 0xc8, 0xf5, 0x81, 0x5b, 0x7f, 0xc7, 0x66, 0x5a, 0xa9,
- 0xaf, 0x8b, 0xdc, 0xcf, 0xaf, 0x2b, 0x4f, 0x2e, 0xd0, 0x78, 0xec, 0x0a, 0xf9, 0xd1, 0xee, 0xff,
- 0x72, 0x30, 0x57, 0x69, 0x63, 0xd5, 0xa4, 0x6b, 0x5b, 0xf4, 0xfc, 0x63, 0x05, 0xda, 0x88, 0xf8,
- 0x7f, 0x0b, 0xb2, 0x30, 0x7e, 0x73, 0xe4, 0x7f, 0x63, 0x08, 0x33, 0xe8, 0xcc, 0xf5, 0x4f, 0x21,
- 0x9e, 0x26, 0xc6, 0x5b, 0x01, 0xca, 0x10, 0x17, 0xc7, 0xdf, 0x1b, 0x83, 0xe5, 0xcc, 0xf3, 0x21,
- 0x24, 0xc9, 0x13, 0x7a, 0xb4, 0xe2, 0x3c, 0xe3, 0x77, 0xbd, 0xb0, 0xe7, 0x57, 0x7d, 0x50, 0x9b,
- 0x6e, 0xf7, 0x9f, 0xe6, 0x01, 0x86, 0x69, 0x26, 0x7a, 0x0a, 0x0b, 0xee, 0x57, 0xbc, 0x68, 0x7d,
- 0xc4, 0x1b, 0x72, 0x7e, 0x23, 0x7c, 0xd0, 0x91, 0xe9, 0x29, 0x2c, 0xb8, 0x9f, 0x6f, 0x0d, 0x99,
- 0x85, 0x3c, 0x21, 0x1b, 0x32, 0x0b, 0x7d, 0xf1, 0x35, 0x83, 0xba, 0x70, 0x33, 0xe2, 0x01, 0x0f,
- 0x7a, 0x7b, 0xb2, 0x67, 0x4e, 0xfc, 0xf7, 0x26, 0x7c, 0x09, 0x24, 0xcc, 0x20, 0x1d, 0x6e, 0x45,
- 0xbe, 0x5b, 0x41, 0x3b, 0x93, 0xbe, 0xaa, 0xe1, 0xdf, 0x99, 0x00, 0xd3, 0x99, 0x73, 0x00, 0x7c,
- 0x74, 0xb3, 0x1c, 0xbd, 0x33, 0xf1, 0x2b, 0x0e, 0xfe, 0xfe, 0xe4, 0xbd, 0x77, 0x61, 0x06, 0x1d,
- 0x42, 0xda, 0xd5, 0x35, 0x45, 0x7c, 0x68, 0x2b, 0x95, 0x32, 0x5e, 0x1f, 0xd1, 0x66, 0xa5, 0x9c,
- 0x5c, 0x8d, 0xac, 0x21, 0xa7, 0x60, 0x4b, 0x6e, 0xc8, 0x29, 0xa4, 0xf3, 0xe5, 0xdf, 0x7e, 0xdf,
- 0xfd, 0x1e, 0xb6, 0xfd, 0xe1, 0x01, 0x42, 0xd8, 0xf6, 0x47, 0x04, 0x0b, 0xc2, 0x0c, 0xfa, 0x0c,
- 0x96, 0xbc, 0x15, 0x6a, 0xb4, 0x39, 0xb2, 0xd2, 0xce, 0xdf, 0x8e, 0x1a, 0x76, 0xb3, 0xf4, 0x16,
- 0x44, 0x87, 0x2c, 0x43, 0x0b, 0xb3, 0x43, 0x96, 0x11, 0x75, 0xd4, 0x19, 0xcb, 0x3f, 0x79, 0xca,
- 0x7c, 0x43, 0xff, 0x14, 0x56, 0x9d, 0x1c, 0xfa, 0xa7, 0xd0, 0xda, 0xa0, 0x30, 0x83, 0x14, 0x58,
- 0x0b, 0xaf, 0x32, 0xa1, 0x7b, 0x13, 0x15, 0xd1, 0xf8, 0xb7, 0xc7, 0xa1, 0x39, 0x53, 0xb5, 0x20,
- 0x1b, 0xd2, 0xd4, 0x46, 0xc2, 0xc8, 0x8e, 0x37, 0x9d, 0xe4, 0xee, 0x04, 0x5d, 0x71, 0xc1, 0x8a,
- 0x42, 0x76, 0xff, 0x23, 0x09, 0x09, 0x72, 0xed, 0x37, 0xe1, 0x86, 0xaf, 0x94, 0x80, 0x6e, 0x8f,
- 0x2e, 0xb0, 0xf0, 0x5b, 0x91, 0xe3, 0xce, 0x1a, 0x5e, 0xc2, 0x72, 0xa0, 0x38, 0x80, 0xb6, 0xdd,
- 0x74, 0x61, 0x05, 0x0a, 0xfe, 0xce, 0x08, 0x0c, 0x3f, 0x6f, 0xaf, 0x6f, 0xdb, 0x1e, 0x97, 0xbd,
- 0x7a, 0x79, 0x47, 0xf9, 0xb3, 0x2f, 0x68, 0x94, 0xe5, 0xf7, 0x64, 0x82, 0x57, 0xae, 0x50, 0x1f,
- 0x76, 0x77, 0x24, 0x8e, 0x33, 0xc3, 0xe7, 0x4e, 0x78, 0xe7, 0x4a, 0x9e, 0x90, 0x47, 0xb8, 0xd0,
- 0x24, 0x8f, 0x17, 0x46, 0xa1, 0x38, 0xec, 0x9f, 0x43, 0xc6, 0x7f, 0xcf, 0xa3, 0xad, 0x31, 0x61,
- 0x07, 0xbf, 0x1d, 0x8d, 0xe0, 0xdf, 0x19, 0xbf, 0x93, 0xf1, 0x4b, 0x15, 0xe6, 0x5e, 0xee, 0x8e,
- 0xc4, 0x71, 0xbb, 0x45, 0x57, 0x84, 0x3b, 0x74, 0x8b, 0xc1, 0x68, 0x78, 0xe8, 0x16, 0x43, 0x42,
- 0x62, 0x61, 0x66, 0xef, 0x31, 0x80, 0xdc, 0xed, 0xbf, 0x92, 0x25, 0xac, 0x0e, 0x7a, 0x68, 0x23,
- 0xd0, 0x7c, 0x2a, 0xab, 0x83, 0x5e, 0xbd, 0x6f, 0x25, 0x5d, 0x46, 0xee, 0xaf, 0xe6, 0x48, 0xaa,
- 0x35, 0x4f, 0x08, 0xac, 0x81, 0xbd, 0x2a, 0x64, 0x86, 0xd4, 0x12, 0x09, 0xa1, 0xd0, 0x9d, 0x50,
- 0x1e, 0xa4, 0x95, 0xef, 0x63, 0xb4, 0xe4, 0x30, 0x22, 0xa3, 0x7b, 0x9f, 0x00, 0xb4, 0x0c, 0x45,
- 0xa2, 0x31, 0x1c, 0xda, 0x0c, 0xf0, 0x79, 0xa2, 0xe0, 0x6e, 0xdb, 0xe6, 0xf1, 0x97, 0x4c, 0x98,
- 0x96, 0xa1, 0xd0, 0x48, 0x6f, 0xef, 0x47, 0x90, 0xa6, 0xc2, 0x9c, 0x59, 0x78, 0xe3, 0xe8, 0x99,
- 0x0c, 0x74, 0xf5, 0x64, 0x64, 0xaf, 0x0c, 0x8b, 0x94, 0x01, 0x4b, 0x18, 0xd1, 0x56, 0x80, 0xc5,
- 0x11, 0x1d, 0xf1, 0x31, 0x59, 0x20, 0x64, 0x6c, 0x6c, 0xaf, 0x08, 0x0b, 0x36, 0x1b, 0xf3, 0x95,
- 0xd6, 0x46, 0xb7, 0x43, 0xb8, 0x58, 0x03, 0x3e, 0x26, 0x69, 0xc6, 0xc4, 0x1a, 0x1a, 0x8a, 0x62,
- 0xff, 0x77, 0x69, 0x50, 0x14, 0x96, 0xd4, 0x85, 0x8a, 0xc2, 0xc6, 0x8a, 0xc9, 0x97, 0xf1, 0x96,
- 0xa1, 0x9c, 0xa6, 0x08, 0xd1, 0x0f, 0xfe, 0x3f, 0x00, 0x00, 0xff, 0xff, 0x6f, 0x0a, 0xad, 0x10,
- 0x0a, 0x3d, 0x00, 0x00,
+ // 4182 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x5c, 0x4d, 0x6c, 0x1b, 0x49,
+ 0x76, 0x56, 0xf3, 0x4f, 0xd2, 0xa3, 0x24, 0x53, 0xa5, 0x1f, 0xd3, 0x2d, 0x59, 0x96, 0xda, 0xe3,
+ 0x19, 0x8d, 0xc7, 0xa6, 0x67, 0xbc, 0x33, 0x83, 0x1d, 0x8d, 0x67, 0x77, 0x48, 0x89, 0x96, 0xb8,
+ 0xa6, 0x49, 0x6d, 0x93, 0xf2, 0xac, 0x9d, 0x0c, 0x7a, 0x5a, 0x64, 0x49, 0x6e, 0x0c, 0xd9, 0xcd,
+ 0xe9, 0x6e, 0x2a, 0xd6, 0xe6, 0x90, 0x64, 0x83, 0x20, 0x1b, 0xe4, 0x12, 0x24, 0x87, 0x4c, 0x4e,
+ 0x59, 0x24, 0x39, 0xee, 0x62, 0x0f, 0x41, 0x10, 0x20, 0x97, 0x00, 0xb9, 0x25, 0x40, 0x90, 0x1c,
+ 0x93, 0x5c, 0xf6, 0x10, 0x20, 0x87, 0x45, 0x02, 0x4c, 0x2e, 0x39, 0xe4, 0x10, 0x04, 0x5d, 0x55,
+ 0xfd, 0xff, 0x43, 0xd2, 0x92, 0x33, 0x01, 0xf6, 0x64, 0x75, 0xd5, 0xab, 0x57, 0xaf, 0xaa, 0xde,
+ 0x7b, 0xf5, 0xde, 0xf7, 0x8a, 0x86, 0xf7, 0x4e, 0x15, 0xf3, 0xf9, 0xf0, 0xb8, 0xd4, 0xd1, 0xfa,
+ 0xf7, 0x3a, 0x9a, 0x6a, 0xca, 0x8a, 0x8a, 0xf5, 0xbb, 0x86, 0xa9, 0xe9, 0xf2, 0x29, 0xbe, 0xab,
+ 0xa8, 0x26, 0xd6, 0x4f, 0xe4, 0x0e, 0xbe, 0x67, 0x0c, 0x70, 0xe7, 0x5e, 0xc7, 0x50, 0x4a, 0x03,
+ 0x5d, 0x33, 0x35, 0x94, 0xb3, 0xfe, 0x3c, 0x7b, 0x87, 0xdf, 0x3c, 0xd5, 0xb4, 0xd3, 0x1e, 0xbe,
+ 0x47, 0x5a, 0x8f, 0x87, 0x27, 0xf7, 0xba, 0xd8, 0xe8, 0xe8, 0xca, 0xc0, 0xd4, 0x74, 0x4a, 0xc9,
+ 0xdf, 0x08, 0x52, 0x98, 0x4a, 0x1f, 0x1b, 0xa6, 0xdc, 0x1f, 0x30, 0x82, 0x8d, 0x20, 0xc1, 0xaf,
+ 0xe8, 0xf2, 0x60, 0x80, 0x75, 0x83, 0xf6, 0x0b, 0xab, 0xb0, 0xbc, 0x8f, 0xcd, 0xc3, 0xde, 0xf0,
+ 0x54, 0x51, 0x6b, 0xea, 0x89, 0x26, 0xe2, 0x2f, 0x86, 0xd8, 0x30, 0x85, 0x7f, 0xe2, 0x60, 0x25,
+ 0xd0, 0x61, 0x0c, 0x34, 0xd5, 0xc0, 0x08, 0x41, 0x46, 0x95, 0xfb, 0xb8, 0xc8, 0x6d, 0x72, 0xdb,
+ 0xb3, 0x22, 0xf9, 0x1b, 0xdd, 0x82, 0x85, 0x33, 0xac, 0x76, 0x35, 0x5d, 0x3a, 0xc3, 0xba, 0xa1,
+ 0x68, 0x6a, 0x31, 0x45, 0x7a, 0xe7, 0x69, 0xeb, 0x13, 0xda, 0x88, 0xf6, 0x61, 0xa6, 0x2f, 0xab,
+ 0xca, 0x09, 0x36, 0xcc, 0x62, 0x7a, 0x33, 0xbd, 0x9d, 0xbf, 0xff, 0x56, 0x89, 0x2e, 0xb5, 0x14,
+ 0x39, 0x57, 0xe9, 0x31, 0xa3, 0xae, 0xaa, 0xa6, 0x7e, 0x2e, 0x3a, 0x83, 0xf9, 0x0f, 0x61, 0xde,
+ 0xd7, 0x85, 0x0a, 0x90, 0xfe, 0x1c, 0x9f, 0x33, 0x99, 0xac, 0x3f, 0xd1, 0x32, 0x64, 0xcf, 0xe4,
+ 0xde, 0x10, 0x33, 0x49, 0xe8, 0xc7, 0x4e, 0xea, 0x9b, 0x9c, 0xb0, 0x01, 0xeb, 0xce, 0x6c, 0xbb,
+ 0xf2, 0x40, 0x3e, 0x56, 0x7a, 0x8a, 0xa9, 0x60, 0xc3, 0x5e, 0xfa, 0xa7, 0x70, 0x3d, 0xa6, 0x9f,
+ 0xed, 0xc0, 0x03, 0x98, 0xeb, 0x78, 0xda, 0x8b, 0x1c, 0x59, 0x4a, 0xd1, 0x5e, 0x4a, 0x60, 0xe4,
+ 0xb9, 0xe8, 0xa3, 0x16, 0xfe, 0x33, 0x0d, 0x85, 0x20, 0x09, 0x7a, 0x00, 0xd3, 0x06, 0xd6, 0xcf,
+ 0x94, 0x0e, 0xdd, 0xd7, 0xfc, 0xfd, 0xcd, 0x38, 0x6e, 0xa5, 0x16, 0xa5, 0x3b, 0x98, 0x12, 0xed,
+ 0x21, 0xe8, 0x08, 0x0a, 0x67, 0x5a, 0x6f, 0xd8, 0xc7, 0x12, 0x7e, 0x31, 0x90, 0x55, 0xe7, 0x00,
+ 0xf2, 0xf7, 0xb7, 0x63, 0xd9, 0x3c, 0x21, 0x03, 0xaa, 0x36, 0xfd, 0xc1, 0x94, 0x78, 0xe5, 0xcc,
+ 0xdf, 0xc4, 0xff, 0x15, 0x07, 0xd3, 0x6c, 0x36, 0xf4, 0x01, 0x64, 0xcc, 0xf3, 0x01, 0x95, 0x6e,
+ 0xe1, 0xfe, 0xad, 0x51, 0xd2, 0x95, 0xda, 0xe7, 0x03, 0x2c, 0x92, 0x21, 0x82, 0x09, 0x19, 0xeb,
+ 0x0b, 0xe5, 0x61, 0xfa, 0xa8, 0xf1, 0xa8, 0xd1, 0xfc, 0xa4, 0x51, 0x98, 0x42, 0xab, 0x80, 0x76,
+ 0x9b, 0x8d, 0xb6, 0xd8, 0xac, 0xd7, 0xab, 0xa2, 0xd4, 0xaa, 0x8a, 0x4f, 0x6a, 0xbb, 0xd5, 0x02,
+ 0x87, 0x5e, 0x83, 0xcd, 0x27, 0xcd, 0xfa, 0xd1, 0xe3, 0xaa, 0x54, 0xde, 0xdd, 0xad, 0xb6, 0x5a,
+ 0xb5, 0x4a, 0xad, 0x5e, 0x6b, 0x3f, 0x95, 0x76, 0x9b, 0x8d, 0x56, 0x5b, 0x2c, 0xd7, 0x1a, 0xed,
+ 0x56, 0x21, 0x85, 0xb6, 0xa0, 0xb8, 0x2f, 0x36, 0x8f, 0x0e, 0xa5, 0x08, 0x1e, 0x69, 0x3e, 0xfd,
+ 0xa3, 0x0a, 0xc7, 0xff, 0x80, 0x83, 0x2b, 0x81, 0x35, 0xa2, 0xb2, 0x6f, 0x11, 0x77, 0xc7, 0xdd,
+ 0x1b, 0xef, 0x62, 0xee, 0x44, 0x2d, 0x06, 0x20, 0xd7, 0x6c, 0xd4, 0x6b, 0x0d, 0x6b, 0x01, 0x79,
+ 0x98, 0x6e, 0x3e, 0x7c, 0x48, 0x3e, 0x52, 0x95, 0x1c, 0x9d, 0x50, 0x58, 0x80, 0xb9, 0x43, 0x5d,
+ 0x3b, 0xc6, 0xb6, 0x8a, 0x95, 0x61, 0x9e, 0x7d, 0x33, 0x95, 0x7a, 0x1b, 0xb2, 0x3a, 0x96, 0xbb,
+ 0xe7, 0xec, 0xf4, 0xf9, 0x12, 0x35, 0xdb, 0x92, 0x6d, 0xb6, 0xa5, 0x8a, 0xa6, 0xf5, 0x9e, 0x58,
+ 0x2a, 0x2c, 0x52, 0x42, 0xe1, 0xab, 0x0c, 0x2c, 0xed, 0xea, 0x58, 0x36, 0x31, 0x95, 0x96, 0xb1,
+ 0x8e, 0x34, 0xcf, 0x07, 0xb0, 0x60, 0xa9, 0x60, 0x47, 0x31, 0xcf, 0x25, 0x5d, 0x56, 0x4f, 0x31,
+ 0xd3, 0x8e, 0x15, 0x7b, 0x07, 0x76, 0x59, 0xaf, 0x68, 0x75, 0x8a, 0xf3, 0x1d, 0xef, 0x27, 0xaa,
+ 0xc1, 0x12, 0xd3, 0x2e, 0x9f, 0xd6, 0xa7, 0xfd, 0x5a, 0x4f, 0xa5, 0xf0, 0x68, 0x3d, 0x3a, 0xf3,
+ 0xb7, 0x28, 0xd8, 0x40, 0x8f, 0x00, 0x06, 0xb2, 0x2e, 0xf7, 0xb1, 0x89, 0x75, 0xa3, 0x98, 0xf1,
+ 0xbb, 0x80, 0x88, 0xd5, 0x94, 0x0e, 0x1d, 0x6a, 0xea, 0x02, 0x3c, 0xc3, 0xd1, 0xbe, 0x65, 0x33,
+ 0x1d, 0x1d, 0x9b, 0x46, 0x31, 0x4b, 0x38, 0x6d, 0x27, 0x71, 0x6a, 0x51, 0x52, 0xc2, 0xa6, 0x92,
+ 0xfe, 0xb2, 0xc2, 0x89, 0xf6, 0x68, 0xd4, 0x84, 0x15, 0x7b, 0x81, 0x9a, 0x6a, 0x62, 0xd5, 0x94,
+ 0x0c, 0x6d, 0xa8, 0x77, 0x70, 0x31, 0x47, 0x76, 0x69, 0x2d, 0xb0, 0x44, 0x4a, 0xd3, 0x22, 0x24,
+ 0x22, 0xdb, 0x1a, 0x5f, 0x23, 0x7a, 0x06, 0xbc, 0xdc, 0xe9, 0x60, 0xc3, 0x50, 0xe8, 0x5e, 0x48,
+ 0x3a, 0xfe, 0x62, 0xa8, 0xe8, 0xb8, 0x8f, 0x55, 0xd3, 0x28, 0x4e, 0xfb, 0xb9, 0xb6, 0xb5, 0x81,
+ 0xd6, 0xd3, 0x4e, 0xcf, 0x45, 0x97, 0x46, 0xbc, 0xe6, 0x1b, 0xee, 0xe9, 0x31, 0xf8, 0x8f, 0xe0,
+ 0x4a, 0x60, 0x53, 0x26, 0x71, 0x7e, 0xfc, 0x0e, 0xcc, 0x79, 0x77, 0x62, 0x22, 0xc7, 0xf9, 0xbb,
+ 0x29, 0x58, 0x8a, 0xd8, 0x03, 0x74, 0x00, 0x33, 0x86, 0x2a, 0x0f, 0x8c, 0xe7, 0x9a, 0xc9, 0xf4,
+ 0xf7, 0x76, 0xc2, 0x96, 0x95, 0x5a, 0x8c, 0x96, 0x7e, 0x1e, 0x4c, 0x89, 0xce, 0x68, 0x54, 0x81,
+ 0x1c, 0xdd, 0xcf, 0xa0, 0xfb, 0x8a, 0xe2, 0x43, 0xdb, 0x1c, 0x2e, 0x6c, 0x24, 0xff, 0x0e, 0x2c,
+ 0xf8, 0x67, 0x40, 0x37, 0x20, 0x6f, 0xcf, 0x20, 0x29, 0x5d, 0xb6, 0x56, 0xb0, 0x9b, 0x6a, 0x5d,
+ 0xfe, 0x2d, 0x98, 0xf3, 0x32, 0x43, 0x6b, 0x30, 0xcb, 0x14, 0xc2, 0x21, 0x9f, 0xa1, 0x0d, 0xb5,
+ 0xae, 0x63, 0xd3, 0xdf, 0x82, 0x65, 0xbf, 0x9e, 0x31, 0x53, 0x7e, 0xdd, 0x59, 0x03, 0xdd, 0x8b,
+ 0x05, 0xff, 0x1a, 0x6c, 0x39, 0x85, 0x3f, 0xce, 0x42, 0x21, 0x68, 0x34, 0xe8, 0x01, 0x64, 0x8f,
+ 0x7b, 0x5a, 0xe7, 0x73, 0x36, 0xf6, 0xb5, 0x38, 0xeb, 0x2a, 0x55, 0x2c, 0x2a, 0xda, 0x7a, 0x30,
+ 0x25, 0xd2, 0x41, 0xd6, 0xe8, 0xbe, 0x36, 0x54, 0x4d, 0xb6, 0x7b, 0xf1, 0xa3, 0x1f, 0x5b, 0x54,
+ 0xee, 0x68, 0x32, 0x08, 0xed, 0x41, 0x9e, 0xaa, 0x9d, 0xd4, 0xd7, 0xba, 0xb8, 0x98, 0x26, 0x3c,
+ 0x6e, 0xc6, 0xf2, 0x28, 0x13, 0xda, 0xc7, 0x5a, 0x17, 0x8b, 0x20, 0x3b, 0x7f, 0xf3, 0xf3, 0x90,
+ 0xf7, 0xc8, 0xc6, 0x0f, 0x21, 0xef, 0x99, 0x0c, 0x5d, 0x85, 0xe9, 0x13, 0x43, 0x72, 0x9c, 0xf0,
+ 0xac, 0x98, 0x3b, 0x31, 0x88, 0x3f, 0xbd, 0x01, 0x79, 0x22, 0x85, 0x74, 0xd2, 0x93, 0x4f, 0x8d,
+ 0x62, 0x6a, 0x33, 0x6d, 0x9d, 0x11, 0x69, 0x7a, 0x68, 0xb5, 0xa0, 0x3b, 0xc0, 0x1c, 0x8a, 0x44,
+ 0xe9, 0x4e, 0x75, 0x6d, 0x38, 0x20, 0x42, 0xce, 0x8a, 0xec, 0xf6, 0x23, 0x13, 0xed, 0x5b, 0xed,
+ 0xfc, 0x9f, 0xa7, 0x00, 0x5c, 0x01, 0xd1, 0x03, 0xc8, 0x90, 0x35, 0x51, 0xc7, 0xbf, 0x3d, 0xc6,
+ 0x9a, 0x4a, 0x64, 0x61, 0x64, 0x94, 0xf0, 0x6f, 0x1c, 0x64, 0x08, 0x9b, 0xe0, 0x0d, 0xd6, 0xaa,
+ 0x35, 0xf6, 0xeb, 0x55, 0xa9, 0xd1, 0xdc, 0xab, 0x4a, 0x9f, 0x88, 0xb5, 0x76, 0x55, 0x2c, 0x70,
+ 0x68, 0x0d, 0xae, 0x7a, 0xdb, 0xc5, 0x6a, 0x79, 0xaf, 0x2a, 0x4a, 0xcd, 0x46, 0xfd, 0x69, 0x21,
+ 0x85, 0x78, 0x58, 0x7d, 0x7c, 0x54, 0x6f, 0xd7, 0xc2, 0x7d, 0x69, 0xb4, 0x0e, 0x45, 0x4f, 0x1f,
+ 0xe3, 0xc1, 0xd8, 0x66, 0x2c, 0xb6, 0x9e, 0x5e, 0xfa, 0x27, 0xeb, 0xcc, 0x22, 0x01, 0xae, 0x79,
+ 0xe7, 0xf4, 0x8f, 0xcd, 0x91, 0x0b, 0xd1, 0xba, 0x33, 0xbd, 0x34, 0x3e, 0x0e, 0xd3, 0x84, 0xa4,
+ 0x32, 0xef, 0x68, 0x00, 0xd1, 0xf0, 0x4f, 0x60, 0xde, 0x77, 0x31, 0x58, 0x61, 0x1e, 0xf3, 0x64,
+ 0x5d, 0xe9, 0xf8, 0xdc, 0x24, 0xa1, 0x0f, 0xb7, 0x9d, 0x16, 0xe7, 0xed, 0xd6, 0x8a, 0xd5, 0x68,
+ 0x9d, 0x65, 0x4f, 0xe9, 0x2b, 0x26, 0xa3, 0x49, 0x11, 0x1a, 0x20, 0x4d, 0x84, 0x40, 0xf8, 0x59,
+ 0x0a, 0x72, 0x4c, 0x21, 0x6e, 0x79, 0xae, 0x26, 0x1f, 0x4b, 0xbb, 0x95, 0xb2, 0xf4, 0x59, 0x64,
+ 0xca, 0x6f, 0x91, 0xe8, 0x00, 0x16, 0xbc, 0xfe, 0xfb, 0x85, 0x1d, 0x5c, 0x6e, 0xf9, 0xcf, 0xd9,
+ 0xeb, 0x44, 0x5e, 0xb0, 0x90, 0x72, 0xfe, 0xcc, 0xdb, 0x86, 0x2a, 0xb0, 0x10, 0xb8, 0x02, 0x32,
+ 0xa3, 0xaf, 0x80, 0xf9, 0x8e, 0xcf, 0x1b, 0x96, 0x61, 0xc9, 0xf6, 0xde, 0x3d, 0x2c, 0x99, 0xcc,
+ 0xbb, 0xb3, 0x2b, 0xaa, 0x10, 0xf2, 0xfa, 0xc8, 0x25, 0xb6, 0xdb, 0xf8, 0x8f, 0x01, 0x85, 0x65,
+ 0x9d, 0xc8, 0x55, 0x0f, 0x61, 0x29, 0xe2, 0x5e, 0x41, 0x25, 0x98, 0x25, 0x47, 0x65, 0x28, 0x26,
+ 0x66, 0x61, 0x6b, 0x58, 0x22, 0x97, 0xc4, 0xa2, 0x1f, 0xe8, 0xf8, 0x04, 0xeb, 0x3a, 0xee, 0x12,
+ 0x9b, 0x8c, 0xa4, 0x77, 0x48, 0x84, 0xdf, 0xe4, 0x60, 0xc6, 0x6e, 0x47, 0x3b, 0x30, 0x63, 0xe0,
+ 0x53, 0x7a, 0xe7, 0xd1, 0xb9, 0x36, 0x82, 0x63, 0x4b, 0x2d, 0x46, 0xc0, 0x02, 0x7c, 0x9b, 0xde,
+ 0x0a, 0xf0, 0x7d, 0x5d, 0x13, 0x2d, 0xfe, 0x2f, 0x39, 0x58, 0xda, 0xc3, 0x3d, 0x1c, 0x0c, 0x8d,
+ 0x92, 0xdc, 0xba, 0x37, 0x9a, 0x48, 0xf9, 0xa3, 0x89, 0x08, 0x56, 0x09, 0xd1, 0xc4, 0x85, 0x6e,
+ 0xd8, 0x55, 0x58, 0xf6, 0xcf, 0x46, 0xef, 0x14, 0xe1, 0x3f, 0xd2, 0xb0, 0x61, 0xe9, 0x82, 0xae,
+ 0xf5, 0x7a, 0x58, 0x3f, 0x1c, 0x1e, 0xf7, 0x14, 0xe3, 0xf9, 0x04, 0x8b, 0xbb, 0x0a, 0xd3, 0xaa,
+ 0xd6, 0xf5, 0x18, 0x4f, 0xce, 0xfa, 0xac, 0x75, 0x51, 0x15, 0x16, 0x83, 0xb1, 0xdd, 0x39, 0xf3,
+ 0xfc, 0xf1, 0x91, 0x5d, 0xe1, 0x2c, 0x78, 0x6d, 0xf1, 0x30, 0x63, 0x45, 0xa5, 0x9a, 0xda, 0x3b,
+ 0x27, 0x16, 0x33, 0x23, 0x3a, 0xdf, 0x48, 0x0c, 0x86, 0x69, 0xdf, 0x70, 0xc2, 0xb4, 0xc4, 0x15,
+ 0x25, 0x45, 0x6c, 0x9f, 0x85, 0x2c, 0x3e, 0x47, 0x58, 0x7f, 0x30, 0x26, 0xeb, 0x91, 0x9e, 0xe0,
+ 0x22, 0xa7, 0x78, 0x09, 0xe6, 0xfb, 0x77, 0x1c, 0xdc, 0x88, 0x5d, 0x02, 0x8b, 0x33, 0xba, 0x70,
+ 0x65, 0x40, 0x3b, 0x9c, 0x4d, 0xa0, 0x56, 0xf6, 0xe1, 0xc8, 0x4d, 0x60, 0xd9, 0x35, 0x6b, 0xf5,
+ 0x6d, 0xc3, 0xc2, 0xc0, 0xd7, 0xc8, 0x97, 0x61, 0x29, 0x82, 0x6c, 0xa2, 0xc5, 0xfc, 0x9c, 0x83,
+ 0x4d, 0x57, 0x94, 0x23, 0x75, 0x70, 0x79, 0xea, 0xdb, 0x76, 0x75, 0x8b, 0xba, 0xfc, 0xf7, 0xc2,
+ 0x6b, 0x8f, 0x9e, 0xf0, 0x55, 0x59, 0xf0, 0x4d, 0xd8, 0x4a, 0x98, 0x9a, 0x99, 0xf3, 0xcf, 0x32,
+ 0xb0, 0xf5, 0x44, 0xee, 0x29, 0x5d, 0x27, 0x7a, 0x8c, 0xc0, 0x21, 0x92, 0xb7, 0xa4, 0x13, 0xb2,
+ 0x00, 0xea, 0xb5, 0x1e, 0x38, 0x56, 0x3b, 0x8a, 0xff, 0x18, 0xd7, 0xe1, 0x25, 0x66, 0x7e, 0x4f,
+ 0x23, 0x32, 0xbf, 0x0f, 0xc6, 0x97, 0x35, 0x29, 0x0f, 0x3c, 0x0a, 0x3a, 0x98, 0xf7, 0xc7, 0xe7,
+ 0x9b, 0xa0, 0x05, 0x17, 0xb6, 0xe2, 0xaf, 0x33, 0x55, 0xfb, 0x9b, 0x0c, 0x08, 0x49, 0xab, 0x67,
+ 0x3e, 0x44, 0x84, 0xd9, 0x8e, 0xa6, 0x9e, 0x28, 0x7a, 0x1f, 0x77, 0x59, 0xca, 0xf1, 0xee, 0x38,
+ 0x9b, 0xc7, 0x1c, 0xc8, 0xae, 0x3d, 0x56, 0x74, 0xd9, 0xa0, 0x22, 0x4c, 0xf7, 0xb1, 0x61, 0xc8,
+ 0xa7, 0xb6, 0x58, 0xf6, 0x27, 0xff, 0x93, 0x34, 0xcc, 0x3a, 0x43, 0x90, 0x1a, 0xd2, 0x60, 0xea,
+ 0xbe, 0xf6, 0x5f, 0x46, 0x80, 0x97, 0x57, 0xe6, 0xd4, 0x4b, 0x28, 0x73, 0xd7, 0xa7, 0xcc, 0xd4,
+ 0x1c, 0xf6, 0x5e, 0x4a, 0xec, 0x04, 0xbd, 0xfe, 0xda, 0x15, 0x50, 0xf8, 0x65, 0x40, 0x75, 0xc5,
+ 0x60, 0xa9, 0x9b, 0xe3, 0x96, 0xac, 0x4c, 0x4d, 0x7e, 0x21, 0x61, 0xd5, 0xd4, 0x15, 0x16, 0xae,
+ 0x67, 0x45, 0xe8, 0xcb, 0x2f, 0xaa, 0xb4, 0xc5, 0x0a, 0xe9, 0x0d, 0x53, 0xd6, 0x4d, 0x45, 0x3d,
+ 0x95, 0x4c, 0xed, 0x73, 0xec, 0x80, 0xc1, 0x76, 0x6b, 0xdb, 0x6a, 0x14, 0xfe, 0x3d, 0x05, 0x4b,
+ 0x3e, 0xf6, 0x4c, 0x27, 0x3f, 0x84, 0x69, 0x97, 0xb7, 0x2f, 0x8c, 0x8f, 0xa0, 0x2e, 0xd1, 0x6d,
+ 0xb3, 0x47, 0xa0, 0xeb, 0x00, 0x2a, 0x7e, 0x61, 0xfa, 0xe6, 0x9d, 0xb5, 0x5a, 0xc8, 0x9c, 0xfc,
+ 0x6f, 0x71, 0x4e, 0xa6, 0x6f, 0xca, 0xe6, 0x90, 0x64, 0x95, 0xcc, 0x45, 0xe3, 0xae, 0xc4, 0xee,
+ 0x18, 0x3a, 0xef, 0xac, 0x58, 0x70, 0x7a, 0x1a, 0xe4, 0xb6, 0x31, 0xd0, 0xbe, 0x83, 0xb3, 0x76,
+ 0x34, 0xb5, 0xab, 0x98, 0x2e, 0xce, 0x7a, 0x35, 0x94, 0x20, 0xd0, 0xee, 0x8a, 0x95, 0x57, 0xd9,
+ 0xc8, 0xaa, 0xd3, 0xca, 0x7f, 0x01, 0x59, 0x7a, 0x1c, 0x63, 0x82, 0x05, 0xe8, 0x63, 0xc8, 0x19,
+ 0x44, 0xe2, 0x20, 0x30, 0x12, 0xb5, 0x27, 0xde, 0x15, 0x8a, 0x6c, 0x9c, 0xf0, 0x2d, 0xe0, 0xdd,
+ 0x8b, 0x69, 0x1f, 0x9b, 0xe3, 0x5f, 0xbf, 0x3b, 0xd6, 0x1a, 0x84, 0x3f, 0x4c, 0xc1, 0x5a, 0x24,
+ 0x83, 0xc9, 0x60, 0x0f, 0x74, 0x10, 0x58, 0xc9, 0xdb, 0xe1, 0x1b, 0x3b, 0xc4, 0x3c, 0x72, 0x45,
+ 0xfc, 0xaf, 0x5f, 0xec, 0x30, 0x2b, 0x13, 0x1f, 0x66, 0xe8, 0x1c, 0xe9, 0xce, 0xfc, 0x24, 0x05,
+ 0x68, 0x1f, 0x9b, 0x4e, 0xaa, 0xcc, 0xb6, 0x34, 0xc6, 0xdf, 0x70, 0x2f, 0xe1, 0x6f, 0xbe, 0xe3,
+ 0xf3, 0x37, 0xd4, 0x63, 0xdd, 0xf6, 0x54, 0x4e, 0x02, 0x53, 0x27, 0xde, 0x96, 0x31, 0xe9, 0x29,
+ 0x8d, 0xf9, 0xc7, 0x4b, 0x4f, 0x2f, 0xe8, 0x56, 0xfe, 0x95, 0x83, 0x25, 0x9f, 0xd0, 0x4c, 0x83,
+ 0xee, 0x02, 0x92, 0xcf, 0x64, 0xa5, 0x27, 0x5b, 0x82, 0xd9, 0xe9, 0x3f, 0x83, 0x03, 0x16, 0x9d,
+ 0x1e, 0x7b, 0x18, 0x7a, 0x04, 0x4b, 0x7d, 0xf9, 0x85, 0xd2, 0x1f, 0xf6, 0x25, 0xb6, 0xcf, 0x86,
+ 0xf2, 0x7d, 0x1b, 0x38, 0x5c, 0x0b, 0x01, 0xe8, 0x35, 0xd5, 0x7c, 0xff, 0x5d, 0x8a, 0xa0, 0x2f,
+ 0xb2, 0x71, 0x4c, 0x79, 0x94, 0xef, 0x63, 0x74, 0x08, 0x4b, 0x7d, 0x45, 0x0d, 0x31, 0x4b, 0x8f,
+ 0x64, 0x46, 0x0d, 0x7c, 0x91, 0x0d, 0x76, 0x39, 0x0a, 0x82, 0x37, 0xe8, 0x65, 0xcb, 0x0d, 0x56,
+ 0x9a, 0x7a, 0xde, 0x60, 0x31, 0x44, 0xc3, 0xb6, 0x65, 0x3f, 0xb2, 0xda, 0x74, 0x33, 0x6c, 0x36,
+ 0xac, 0xf4, 0x12, 0x5b, 0x78, 0xfa, 0x9f, 0xb4, 0xd7, 0x82, 0x43, 0xd4, 0xe8, 0x43, 0x48, 0xeb,
+ 0x83, 0x0e, 0x33, 0xdf, 0x37, 0xc6, 0xe0, 0x5f, 0x12, 0x0f, 0x77, 0x0f, 0xa6, 0x44, 0x6b, 0x14,
+ 0xff, 0x47, 0x69, 0x48, 0x8b, 0x87, 0xbb, 0xe8, 0x63, 0x5f, 0x89, 0xe5, 0xce, 0x98, 0x5c, 0xbc,
+ 0x15, 0x96, 0x7f, 0x48, 0x45, 0x95, 0x58, 0x8a, 0xb0, 0xbc, 0x2b, 0x56, 0xcb, 0xed, 0xaa, 0xb4,
+ 0x57, 0xad, 0x57, 0xdb, 0x55, 0x89, 0x56, 0x89, 0x0a, 0x1c, 0x5a, 0x87, 0xe2, 0xe1, 0x51, 0xa5,
+ 0x5e, 0x6b, 0x1d, 0x48, 0x47, 0x0d, 0xfb, 0x2f, 0xd6, 0x9b, 0x42, 0x05, 0x98, 0xab, 0xd7, 0x5a,
+ 0x6d, 0xd6, 0xd0, 0x2a, 0xa4, 0xad, 0x96, 0xfd, 0x6a, 0x5b, 0xda, 0x2d, 0x1f, 0x96, 0x77, 0x6b,
+ 0xed, 0xa7, 0x85, 0x0c, 0xe2, 0x61, 0xd5, 0xcf, 0xbb, 0xd5, 0x28, 0x1f, 0xb6, 0x0e, 0x9a, 0xed,
+ 0x42, 0x16, 0x21, 0x58, 0x20, 0xe3, 0xed, 0xa6, 0x56, 0x21, 0x67, 0x71, 0xd8, 0xad, 0x37, 0x1b,
+ 0x8e, 0x0c, 0xd3, 0x68, 0x19, 0x0a, 0xf6, 0xcc, 0x62, 0xb5, 0xbc, 0x47, 0x00, 0xbd, 0x19, 0xb4,
+ 0x08, 0xf3, 0xd5, 0xef, 0x1d, 0x96, 0x1b, 0x7b, 0x36, 0xe1, 0x2c, 0xda, 0x84, 0x75, 0xaf, 0x38,
+ 0x12, 0x1b, 0x55, 0xdd, 0x23, 0xa0, 0x5c, 0xab, 0x00, 0xe8, 0x1a, 0x14, 0x58, 0x01, 0x6c, 0xb7,
+ 0xd9, 0xd8, 0xab, 0xb5, 0x6b, 0xcd, 0x46, 0x21, 0x4f, 0x11, 0xbc, 0x25, 0x00, 0x4b, 0x72, 0xc6,
+ 0x6c, 0x6e, 0x34, 0xac, 0x37, 0x4f, 0x61, 0x3d, 0x1b, 0xb1, 0xfe, 0x79, 0x0a, 0x56, 0x28, 0x64,
+ 0x6d, 0x03, 0xe4, 0xb6, 0xaf, 0xda, 0x86, 0x02, 0xc5, 0xbb, 0xa4, 0xe0, 0x2d, 0xb0, 0x40, 0xdb,
+ 0x9f, 0xd8, 0x79, 0x87, 0x5d, 0x5e, 0x4a, 0x79, 0xca, 0x4b, 0xb5, 0x60, 0x16, 0x76, 0xdb, 0x5f,
+ 0x88, 0x09, 0xcc, 0x96, 0x94, 0xd8, 0x3f, 0x8e, 0x48, 0x13, 0xee, 0x26, 0x73, 0x4b, 0x0a, 0xa1,
+ 0x2e, 0x92, 0xc5, 0x5f, 0xd0, 0xcb, 0x3d, 0x84, 0xd5, 0xa0, 0xbc, 0xcc, 0xa0, 0xef, 0x84, 0xca,
+ 0x25, 0x8e, 0xdb, 0x75, 0x68, 0x1d, 0x0a, 0xe1, 0x87, 0x29, 0x98, 0xb1, 0x9b, 0xad, 0xf0, 0xc6,
+ 0xf2, 0x4b, 0x3e, 0xa4, 0x74, 0xd6, 0x6a, 0x71, 0x80, 0x57, 0x6f, 0xa1, 0x23, 0x15, 0x2c, 0x74,
+ 0x44, 0x9e, 0x73, 0x3a, 0xf2, 0x9c, 0xbf, 0x0d, 0xf3, 0x1d, 0x4b, 0x7c, 0x45, 0x53, 0x25, 0x53,
+ 0xe9, 0xdb, 0x40, 0x68, 0xb8, 0x30, 0xd9, 0xb6, 0x1f, 0x1c, 0x88, 0x73, 0xf6, 0x00, 0xab, 0x09,
+ 0x6d, 0xc2, 0x1c, 0x29, 0x54, 0x4a, 0xa6, 0x26, 0x0d, 0x0d, 0x5c, 0xcc, 0x12, 0x58, 0x08, 0x48,
+ 0x5b, 0x5b, 0x3b, 0x32, 0x30, 0xba, 0x07, 0x8b, 0x04, 0xc4, 0x97, 0xbc, 0x32, 0xe7, 0x2c, 0x69,
+ 0x58, 0xd4, 0x44, 0x7a, 0x5b, 0x8e, 0xf4, 0xc2, 0x5f, 0x73, 0xb0, 0x42, 0xe1, 0xb1, 0xa0, 0xfe,
+ 0x8e, 0xaa, 0xf0, 0x78, 0x55, 0x34, 0x70, 0x7d, 0x46, 0x32, 0x7c, 0x55, 0xe8, 0x40, 0x11, 0x56,
+ 0x83, 0xf3, 0x31, 0x48, 0xe0, 0xa7, 0x29, 0x58, 0xb6, 0x62, 0x39, 0xbb, 0xe3, 0xb2, 0xc3, 0xed,
+ 0x09, 0x8e, 0x3e, 0xb0, 0x99, 0x99, 0xd0, 0x66, 0x1e, 0x04, 0x13, 0xee, 0x37, 0xbd, 0xd1, 0x68,
+ 0x70, 0x05, 0xaf, 0x6a, 0x2f, 0x7f, 0xcc, 0xc1, 0x4a, 0x60, 0x3e, 0x66, 0x60, 0x1f, 0x05, 0x33,
+ 0x88, 0x9b, 0x31, 0xf2, 0xbd, 0x54, 0x0e, 0xf1, 0x9e, 0x1d, 0xbb, 0x4f, 0x66, 0xc7, 0xff, 0x98,
+ 0x82, 0xeb, 0xee, 0x2d, 0x48, 0xde, 0x16, 0x74, 0x27, 0x80, 0xc0, 0x2e, 0x56, 0xc2, 0xff, 0x6e,
+ 0xd0, 0x43, 0xdf, 0x0f, 0x5f, 0xcc, 0x11, 0x22, 0x25, 0x79, 0xea, 0x48, 0xe4, 0x38, 0x33, 0x29,
+ 0x72, 0x7c, 0x21, 0x0d, 0xf8, 0x35, 0x2f, 0x28, 0xee, 0x17, 0x9f, 0x69, 0xc2, 0x98, 0xd5, 0xa5,
+ 0xf7, 0xe1, 0x2a, 0x49, 0x17, 0x9c, 0xd7, 0x33, 0x76, 0xc1, 0x9e, 0xfa, 0xd0, 0x19, 0x71, 0xc5,
+ 0xea, 0x76, 0xde, 0x83, 0xb0, 0x8a, 0x4a, 0x57, 0xf8, 0x2a, 0x03, 0xab, 0x56, 0x3a, 0xd1, 0x32,
+ 0xe5, 0xd3, 0x49, 0x6a, 0x0d, 0xbf, 0x14, 0x86, 0x6e, 0x53, 0xfe, 0x63, 0x89, 0xe6, 0x3a, 0x0e,
+ 0x62, 0x8b, 0x4a, 0xb0, 0x64, 0x98, 0xf2, 0x29, 0x71, 0x07, 0xb2, 0x7e, 0x8a, 0x4d, 0x69, 0x20,
+ 0x9b, 0xcf, 0x99, 0xad, 0x2f, 0xb2, 0xae, 0x36, 0xe9, 0x39, 0x94, 0xcd, 0xe7, 0x97, 0x74, 0x90,
+ 0xe8, 0x3b, 0x41, 0xa7, 0xf0, 0xd6, 0x88, 0xb5, 0x24, 0xe8, 0xd6, 0xf7, 0x62, 0xe0, 0xfd, 0x77,
+ 0x46, 0xb0, 0x1c, 0x0d, 0xeb, 0x5f, 0x1c, 0xce, 0xfe, 0x9a, 0x2b, 0x03, 0xd7, 0xe0, 0x6a, 0x68,
+ 0xf1, 0xec, 0x0a, 0x39, 0x85, 0xa2, 0xd5, 0x75, 0xa4, 0x1a, 0x13, 0xaa, 0x63, 0x8c, 0xc6, 0xa4,
+ 0x62, 0x34, 0x46, 0x58, 0x83, 0x6b, 0x11, 0x13, 0x31, 0x29, 0xfe, 0x22, 0x4b, 0xc5, 0x98, 0xbc,
+ 0x48, 0xf5, 0x69, 0x9c, 0x55, 0xbc, 0xeb, 0x3d, 0xf6, 0xc8, 0x7a, 0xce, 0xab, 0xb0, 0x8b, 0x1b,
+ 0x90, 0xf7, 0xd2, 0xb1, 0x6b, 0xd0, 0x1c, 0x61, 0x38, 0xd9, 0x0b, 0xd5, 0xce, 0x72, 0x81, 0xda,
+ 0x59, 0xdd, 0x35, 0xaa, 0x69, 0x7f, 0x2c, 0x1c, 0xbb, 0x15, 0x09, 0x66, 0xf5, 0x2c, 0x64, 0x56,
+ 0x33, 0xfe, 0x82, 0x5c, 0x2c, 0xd3, 0x5f, 0x00, 0xc3, 0x62, 0x4a, 0x1d, 0x59, 0x29, 0x13, 0x9e,
+ 0x01, 0x4f, 0x35, 0x7e, 0xf2, 0xda, 0x55, 0x40, 0x8d, 0x52, 0x41, 0x35, 0x12, 0xae, 0xc3, 0x5a,
+ 0x24, 0x6f, 0x36, 0xf5, 0xef, 0x70, 0x54, 0x30, 0x07, 0x14, 0x6b, 0x99, 0xb2, 0x69, 0x8c, 0x3b,
+ 0x35, 0xeb, 0xf4, 0x4e, 0x4d, 0x9b, 0x88, 0x06, 0x4f, 0x68, 0x12, 0xc2, 0xef, 0x71, 0x74, 0x1f,
+ 0x82, 0xb2, 0xb0, 0xdb, 0xf6, 0x4d, 0xc8, 0x0e, 0x09, 0xee, 0x4f, 0xa3, 0xae, 0x25, 0xbf, 0x11,
+ 0x1c, 0x59, 0x5d, 0x22, 0xa5, 0xb8, 0x34, 0x24, 0x55, 0xf8, 0x29, 0x07, 0x79, 0x0f, 0x7f, 0xb4,
+ 0x0e, 0xb3, 0x0e, 0x54, 0x64, 0x27, 0x48, 0x4e, 0x83, 0x75, 0xfc, 0xa6, 0x66, 0xca, 0x3d, 0xf6,
+ 0x26, 0x85, 0x7e, 0x58, 0x39, 0xed, 0xd0, 0xc0, 0x34, 0x1c, 0x4e, 0x8b, 0xe4, 0x6f, 0x74, 0x07,
+ 0x32, 0x43, 0x55, 0x31, 0x89, 0xd9, 0x2f, 0x04, 0xed, 0x99, 0x4c, 0x55, 0x3a, 0x52, 0x15, 0x53,
+ 0x24, 0x54, 0xc2, 0x6d, 0xc8, 0x58, 0x5f, 0x7e, 0xc8, 0x62, 0x16, 0xb2, 0x95, 0xa7, 0xed, 0x6a,
+ 0xab, 0xc0, 0x21, 0x80, 0x5c, 0x8d, 0x26, 0xf8, 0x29, 0xa1, 0x6e, 0xbf, 0x4b, 0x75, 0x16, 0x61,
+ 0xb9, 0x00, 0xf9, 0x58, 0xd5, 0xf4, 0xbe, 0xdc, 0x23, 0x32, 0xcf, 0x88, 0xce, 0x77, 0x7c, 0x39,
+ 0x85, 0x82, 0x8f, 0xeb, 0xce, 0x89, 0x44, 0x01, 0x4c, 0x9f, 0x51, 0xdd, 0x8a, 0x83, 0x96, 0xca,
+ 0x91, 0xd0, 0xd2, 0x75, 0xdf, 0x2d, 0x3b, 0x02, 0x54, 0xfa, 0xdb, 0x14, 0xac, 0x44, 0xd2, 0xa1,
+ 0xf7, 0xbc, 0x70, 0xd2, 0x56, 0x22, 0x4f, 0x2f, 0x90, 0xf4, 0x15, 0x47, 0x81, 0xa4, 0x1d, 0x1f,
+ 0x90, 0xf4, 0xfa, 0xc8, 0xf1, 0x5e, 0x08, 0xe9, 0xc7, 0x5c, 0x0c, 0x84, 0xd4, 0x6a, 0x97, 0xf7,
+ 0xab, 0xd2, 0x51, 0x83, 0xfe, 0xeb, 0x40, 0x48, 0xcb, 0x50, 0x70, 0x81, 0x15, 0xa9, 0xd5, 0x2e,
+ 0x93, 0x47, 0xc6, 0x21, 0xf8, 0x26, 0x1d, 0x09, 0xce, 0x64, 0x46, 0xe3, 0x30, 0x59, 0x4a, 0xb2,
+ 0x0a, 0x88, 0x8d, 0x7e, 0xdc, 0x3c, 0x6a, 0xb4, 0x25, 0xf2, 0x84, 0xb9, 0x90, 0x73, 0xf0, 0x99,
+ 0x65, 0x40, 0xec, 0xb4, 0xbc, 0x2f, 0xf1, 0xff, 0x84, 0x83, 0x25, 0x5f, 0x33, 0x3b, 0x3c, 0x4f,
+ 0x51, 0x9c, 0xf3, 0x15, 0xc5, 0xef, 0xc1, 0xb2, 0x95, 0x31, 0x52, 0x4b, 0x31, 0xa4, 0x01, 0xd6,
+ 0x09, 0x18, 0xce, 0x74, 0x7e, 0xb1, 0x2f, 0xbf, 0x60, 0x05, 0x83, 0x43, 0xac, 0x5b, 0x8c, 0x2f,
+ 0x01, 0x12, 0x16, 0xbe, 0x4c, 0xd3, 0xb8, 0x64, 0xe2, 0xbc, 0x66, 0xa4, 0x8f, 0x0a, 0x27, 0x3e,
+ 0xe9, 0x09, 0x12, 0x9f, 0x18, 0x0f, 0x97, 0x99, 0x28, 0x18, 0x9e, 0xfc, 0x4e, 0x6f, 0xb8, 0xf7,
+ 0x36, 0x8d, 0x5c, 0xef, 0x78, 0xf5, 0x77, 0x64, 0xa6, 0x95, 0xfb, 0xb2, 0xc2, 0xfd, 0xe8, 0xb2,
+ 0xf2, 0xe4, 0x32, 0x8d, 0xc7, 0x2e, 0x90, 0x1f, 0x09, 0x77, 0xe0, 0x16, 0x79, 0x56, 0x39, 0x0a,
+ 0xd0, 0xa6, 0x2e, 0xe9, 0x57, 0xe1, 0xf5, 0x51, 0xd4, 0x6c, 0xfa, 0x7a, 0xa4, 0xff, 0x71, 0x6a,
+ 0x5b, 0x01, 0x2e, 0x23, 0x5c, 0x11, 0x9d, 0xfc, 0xb7, 0x53, 0xb0, 0x39, 0x6a, 0x1c, 0xfa, 0xd8,
+ 0xeb, 0x9a, 0xee, 0x8c, 0x3b, 0x9d, 0xd7, 0x4b, 0xfd, 0x01, 0xf3, 0x52, 0x55, 0x9f, 0x97, 0x7a,
+ 0x67, 0x12, 0x56, 0x5e, 0x87, 0x55, 0x8d, 0xf2, 0x57, 0x6f, 0xc3, 0x1b, 0x7e, 0x58, 0xda, 0xe3,
+ 0xa3, 0xe8, 0xaf, 0x1f, 0x1c, 0x9c, 0x9a, 0x23, 0x0e, 0x66, 0xc7, 0x87, 0xf6, 0xfe, 0x7e, 0x1a,
+ 0x36, 0xbd, 0x0f, 0x94, 0xf7, 0xbd, 0x68, 0x5a, 0xd2, 0xaf, 0x05, 0x6e, 0xc3, 0x62, 0x10, 0x29,
+ 0xb2, 0x1f, 0xe4, 0x5e, 0xf1, 0x43, 0x45, 0x46, 0xd2, 0x03, 0x9c, 0x11, 0x53, 0x27, 0xe7, 0x7f,
+ 0x61, 0x14, 0xf8, 0x9b, 0x63, 0x33, 0xfe, 0xff, 0x09, 0x08, 0x53, 0xf5, 0xec, 0xc1, 0x56, 0x82,
+ 0xfc, 0xcc, 0x2c, 0x2a, 0xb0, 0xe0, 0x07, 0x46, 0x99, 0xa6, 0x06, 0x5e, 0xa1, 0xfa, 0x07, 0xcf,
+ 0xfb, 0xd0, 0x52, 0x3a, 0xdb, 0x3f, 0x73, 0xf6, 0x83, 0x7d, 0x1f, 0xad, 0x75, 0xc2, 0x61, 0xe4,
+ 0x95, 0x2e, 0x22, 0x08, 0xba, 0xa2, 0x12, 0xcc, 0xda, 0x54, 0x46, 0xf0, 0x09, 0xa8, 0x33, 0xb9,
+ 0x4b, 0x12, 0x06, 0x8e, 0xd3, 0x17, 0x04, 0x8e, 0x33, 0x41, 0xe0, 0x98, 0xae, 0xed, 0x87, 0x29,
+ 0xd8, 0xf4, 0xbe, 0x95, 0x8c, 0x54, 0xef, 0x49, 0x16, 0xba, 0x05, 0x73, 0x1e, 0x2a, 0x5b, 0xe3,
+ 0xf3, 0x2e, 0xee, 0x99, 0xa4, 0xed, 0xa3, 0x24, 0x79, 0x45, 0x20, 0x28, 0xdd, 0x8a, 0x6d, 0xd8,
+ 0x4a, 0x98, 0x9f, 0x2a, 0x15, 0xa5, 0xfc, 0x41, 0x8a, 0xfc, 0xb6, 0xed, 0xff, 0x6e, 0xc7, 0xe2,
+ 0x81, 0xc7, 0x44, 0x31, 0x5e, 0xe9, 0x76, 0x29, 0xb0, 0x11, 0x37, 0xf9, 0x25, 0x1b, 0xe0, 0xfd,
+ 0xff, 0xe6, 0x60, 0xa6, 0xd6, 0xc5, 0xaa, 0x49, 0x83, 0x82, 0x79, 0xdf, 0xaf, 0x1c, 0xd1, 0x7a,
+ 0xcc, 0x8f, 0x1f, 0xc9, 0x16, 0xf0, 0xd7, 0x13, 0x7f, 0x1a, 0x29, 0x4c, 0xa1, 0x13, 0xcf, 0x2f,
+ 0x34, 0x7d, 0xcf, 0x05, 0x5e, 0x0b, 0x8d, 0x8c, 0xb8, 0xab, 0xf9, 0x5b, 0x23, 0xa8, 0x9c, 0x79,
+ 0xde, 0x87, 0x2c, 0xf9, 0xb1, 0x1a, 0x5a, 0x76, 0x7e, 0x30, 0xe7, 0xf9, 0x2d, 0x1b, 0xbf, 0x12,
+ 0x68, 0xb5, 0xc7, 0xdd, 0xff, 0xfb, 0x59, 0x00, 0xf7, 0x0e, 0x44, 0x8f, 0x60, 0xce, 0xeb, 0xfa,
+ 0xd0, 0x5a, 0xc2, 0xaf, 0xb5, 0xf8, 0xf5, 0xe8, 0x4e, 0x47, 0xa6, 0x47, 0x30, 0xe7, 0x55, 0x79,
+ 0x97, 0x59, 0xc4, 0x63, 0x6d, 0x97, 0x59, 0xe4, 0xdb, 0xea, 0x29, 0xd4, 0x83, 0xab, 0x31, 0x4f,
+ 0x65, 0xd1, 0xeb, 0xe3, 0x3d, 0x28, 0xe6, 0xdf, 0x18, 0xf3, 0xcd, 0xad, 0x30, 0x85, 0x74, 0xb8,
+ 0x16, 0xfb, 0x42, 0x14, 0x6d, 0x8f, 0xfb, 0x7e, 0x95, 0x7f, 0x73, 0x0c, 0x4a, 0x67, 0xce, 0x21,
+ 0xf0, 0xf1, 0xcf, 0xd2, 0xd0, 0x9b, 0x63, 0xbf, 0x97, 0xe4, 0x6f, 0x8f, 0xff, 0xca, 0x4d, 0x98,
+ 0x42, 0x07, 0x90, 0xf7, 0xbc, 0x4f, 0x42, 0x7c, 0xe4, 0xa3, 0x25, 0xca, 0x78, 0x2d, 0xe1, 0x41,
+ 0x13, 0xe5, 0xe4, 0x79, 0x32, 0xe2, 0x72, 0x0a, 0x3f, 0x7e, 0x71, 0x39, 0x45, 0xbc, 0x31, 0x09,
+ 0x6e, 0x7f, 0x20, 0x30, 0x8d, 0xda, 0xfe, 0xe8, 0x48, 0x37, 0x6a, 0xfb, 0x63, 0xa2, 0x5c, 0x61,
+ 0x0a, 0x7d, 0x17, 0x16, 0xfc, 0xb5, 0x60, 0x74, 0x3d, 0xb1, 0xa6, 0xcd, 0x6f, 0xc4, 0x75, 0x7b,
+ 0x59, 0xfa, 0x2b, 0x89, 0x2e, 0xcb, 0xc8, 0x8a, 0xa6, 0xcb, 0x32, 0xa6, 0x00, 0x39, 0x65, 0xf9,
+ 0x27, 0x5f, 0x7d, 0xcc, 0xf5, 0x4f, 0x51, 0x65, 0x3d, 0xd7, 0x3f, 0x45, 0x16, 0xd5, 0x84, 0x29,
+ 0xa4, 0xc0, 0x6a, 0x74, 0x79, 0x06, 0xdd, 0x1a, 0xab, 0xfa, 0xc4, 0xbf, 0x3e, 0x8a, 0xcc, 0x99,
+ 0xaa, 0x03, 0x4b, 0x11, 0xcf, 0xc7, 0x90, 0x90, 0xf8, 0xb6, 0x8c, 0x4e, 0x72, 0x73, 0x8c, 0xf7,
+ 0x67, 0x02, 0x71, 0xe6, 0xff, 0x95, 0x86, 0x2b, 0x81, 0xc0, 0x1e, 0xfd, 0x06, 0x07, 0x1b, 0xc9,
+ 0xc9, 0x0e, 0xba, 0x1b, 0x93, 0x14, 0xc4, 0x28, 0x56, 0x69, 0x5c, 0x72, 0x8f, 0x71, 0x5f, 0x8b,
+ 0x8d, 0x29, 0xd1, 0xf6, 0xb8, 0x61, 0xb3, 0x47, 0xa3, 0x47, 0x05, 0xa8, 0x64, 0x3b, 0xac, 0x69,
+ 0x63, 0xa3, 0x0e, 0xb4, 0x3d, 0x6e, 0x60, 0xe4, 0x4e, 0x3b, 0x32, 0x84, 0xa1, 0xd3, 0xf6, 0x60,
+ 0x35, 0xfa, 0xf6, 0x46, 0xb7, 0xc6, 0x0a, 0x2d, 0x5c, 0xad, 0x4a, 0x0e, 0x02, 0xc8, 0x6c, 0x24,
+ 0xad, 0xba, 0xff, 0x2f, 0x59, 0xc8, 0x10, 0xa0, 0xa4, 0x0d, 0x57, 0x02, 0xc5, 0x17, 0xb4, 0x91,
+ 0x5c, 0x92, 0xe2, 0x6f, 0xc4, 0xf6, 0x3b, 0xe7, 0xf7, 0x0c, 0x16, 0x43, 0xe5, 0x14, 0xb4, 0xe9,
+ 0x1d, 0x17, 0x55, 0xd2, 0xe1, 0xb7, 0x12, 0x28, 0x82, 0xbc, 0xfd, 0x97, 0xda, 0xe6, 0x28, 0xbc,
+ 0xdf, 0xcf, 0x3b, 0xee, 0x22, 0xfb, 0x8c, 0xe2, 0x52, 0xc1, 0x2b, 0x4c, 0xf0, 0xcb, 0x15, 0x79,
+ 0x79, 0xdd, 0x4c, 0xa4, 0x71, 0x66, 0xf8, 0xd4, 0x01, 0xc4, 0x3c, 0x70, 0x33, 0xf2, 0x09, 0x17,
+ 0x09, 0x8b, 0xf3, 0x42, 0x12, 0x89, 0xc3, 0xfe, 0x13, 0x28, 0x04, 0x91, 0x11, 0x74, 0x63, 0x04,
+ 0x50, 0xc3, 0x6f, 0xc6, 0x13, 0x04, 0x77, 0x26, 0xe8, 0x09, 0x82, 0x52, 0x45, 0x99, 0xff, 0xcd,
+ 0x44, 0x1a, 0xef, 0x7d, 0xe8, 0xc1, 0x04, 0xdd, 0xfb, 0x30, 0x8c, 0x1f, 0xba, 0xf7, 0x61, 0x04,
+ 0x88, 0x28, 0x4c, 0xed, 0x3c, 0x00, 0x90, 0x7b, 0x83, 0xe7, 0xb2, 0x84, 0xd5, 0x61, 0x1f, 0xad,
+ 0x87, 0xd2, 0xb4, 0xaa, 0x3a, 0xec, 0x37, 0x07, 0x56, 0x76, 0x66, 0x14, 0xff, 0x6c, 0x86, 0xe4,
+ 0x62, 0xb3, 0x64, 0x80, 0xd5, 0xb1, 0x53, 0x87, 0x82, 0x3b, 0x5a, 0x22, 0x81, 0x36, 0xda, 0x8a,
+ 0xe4, 0x41, 0x5e, 0x4b, 0x06, 0x18, 0x2d, 0x38, 0x8c, 0x48, 0xef, 0xce, 0x47, 0x00, 0x1d, 0x43,
+ 0x91, 0x68, 0xa4, 0x8f, 0xae, 0x87, 0xf8, 0x3c, 0x54, 0x70, 0xaf, 0x6b, 0xf3, 0xf8, 0x53, 0x26,
+ 0x4c, 0xc7, 0x50, 0x68, 0x3e, 0xb0, 0xf3, 0x6d, 0xc8, 0x53, 0x61, 0x4e, 0x2c, 0xba, 0x51, 0xe3,
+ 0x99, 0x0c, 0x74, 0xf5, 0xa4, 0x67, 0xa7, 0x0a, 0xf3, 0x94, 0x01, 0x83, 0xd8, 0xd1, 0x8d, 0x10,
+ 0x8b, 0xc7, 0xb4, 0x27, 0xc0, 0x64, 0x8e, 0x0c, 0x63, 0x7d, 0x3b, 0x15, 0x98, 0xb3, 0xd9, 0x98,
+ 0xcf, 0xb5, 0x2e, 0xda, 0x88, 0xe0, 0x62, 0x75, 0x04, 0x98, 0xe4, 0x19, 0x13, 0xab, 0xcb, 0x15,
+ 0xc5, 0xfe, 0x3f, 0x3e, 0xc2, 0xa2, 0x30, 0x54, 0x29, 0x52, 0x14, 0xd6, 0x57, 0xc9, 0x3e, 0x4b,
+ 0x77, 0x0c, 0xe5, 0x38, 0x47, 0x06, 0x7d, 0xe3, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x0c, 0x79,
+ 0xd8, 0xd8, 0x90, 0x46, 0x00, 0x00,
}
// Reference imports to suppress errors if they are not otherwise used.
@@ -5957,6 +6699,186 @@ var _Controller_serviceDesc = grpc.ServiceDesc{
Metadata: "github.com/container-storage-interface/spec/csi.proto",
}
+// GroupControllerClient is the client API for GroupController service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+type GroupControllerClient interface {
+ GroupControllerGetCapabilities(ctx context.Context, in *GroupControllerGetCapabilitiesRequest, opts ...grpc.CallOption) (*GroupControllerGetCapabilitiesResponse, error)
+ CreateVolumeGroupSnapshot(ctx context.Context, in *CreateVolumeGroupSnapshotRequest, opts ...grpc.CallOption) (*CreateVolumeGroupSnapshotResponse, error)
+ DeleteVolumeGroupSnapshot(ctx context.Context, in *DeleteVolumeGroupSnapshotRequest, opts ...grpc.CallOption) (*DeleteVolumeGroupSnapshotResponse, error)
+ GetVolumeGroupSnapshot(ctx context.Context, in *GetVolumeGroupSnapshotRequest, opts ...grpc.CallOption) (*GetVolumeGroupSnapshotResponse, error)
+}
+
+type groupControllerClient struct {
+ cc *grpc.ClientConn
+}
+
+func NewGroupControllerClient(cc *grpc.ClientConn) GroupControllerClient {
+ return &groupControllerClient{cc}
+}
+
+func (c *groupControllerClient) GroupControllerGetCapabilities(ctx context.Context, in *GroupControllerGetCapabilitiesRequest, opts ...grpc.CallOption) (*GroupControllerGetCapabilitiesResponse, error) {
+ out := new(GroupControllerGetCapabilitiesResponse)
+ err := c.cc.Invoke(ctx, "/csi.v1.GroupController/GroupControllerGetCapabilities", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *groupControllerClient) CreateVolumeGroupSnapshot(ctx context.Context, in *CreateVolumeGroupSnapshotRequest, opts ...grpc.CallOption) (*CreateVolumeGroupSnapshotResponse, error) {
+ out := new(CreateVolumeGroupSnapshotResponse)
+ err := c.cc.Invoke(ctx, "/csi.v1.GroupController/CreateVolumeGroupSnapshot", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *groupControllerClient) DeleteVolumeGroupSnapshot(ctx context.Context, in *DeleteVolumeGroupSnapshotRequest, opts ...grpc.CallOption) (*DeleteVolumeGroupSnapshotResponse, error) {
+ out := new(DeleteVolumeGroupSnapshotResponse)
+ err := c.cc.Invoke(ctx, "/csi.v1.GroupController/DeleteVolumeGroupSnapshot", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *groupControllerClient) GetVolumeGroupSnapshot(ctx context.Context, in *GetVolumeGroupSnapshotRequest, opts ...grpc.CallOption) (*GetVolumeGroupSnapshotResponse, error) {
+ out := new(GetVolumeGroupSnapshotResponse)
+ err := c.cc.Invoke(ctx, "/csi.v1.GroupController/GetVolumeGroupSnapshot", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// GroupControllerServer is the server API for GroupController service.
+type GroupControllerServer interface {
+ GroupControllerGetCapabilities(context.Context, *GroupControllerGetCapabilitiesRequest) (*GroupControllerGetCapabilitiesResponse, error)
+ CreateVolumeGroupSnapshot(context.Context, *CreateVolumeGroupSnapshotRequest) (*CreateVolumeGroupSnapshotResponse, error)
+ DeleteVolumeGroupSnapshot(context.Context, *DeleteVolumeGroupSnapshotRequest) (*DeleteVolumeGroupSnapshotResponse, error)
+ GetVolumeGroupSnapshot(context.Context, *GetVolumeGroupSnapshotRequest) (*GetVolumeGroupSnapshotResponse, error)
+}
+
+// UnimplementedGroupControllerServer can be embedded to have forward compatible implementations.
+type UnimplementedGroupControllerServer struct {
+}
+
+func (*UnimplementedGroupControllerServer) GroupControllerGetCapabilities(ctx context.Context, req *GroupControllerGetCapabilitiesRequest) (*GroupControllerGetCapabilitiesResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method GroupControllerGetCapabilities not implemented")
+}
+func (*UnimplementedGroupControllerServer) CreateVolumeGroupSnapshot(ctx context.Context, req *CreateVolumeGroupSnapshotRequest) (*CreateVolumeGroupSnapshotResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method CreateVolumeGroupSnapshot not implemented")
+}
+func (*UnimplementedGroupControllerServer) DeleteVolumeGroupSnapshot(ctx context.Context, req *DeleteVolumeGroupSnapshotRequest) (*DeleteVolumeGroupSnapshotResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method DeleteVolumeGroupSnapshot not implemented")
+}
+func (*UnimplementedGroupControllerServer) GetVolumeGroupSnapshot(ctx context.Context, req *GetVolumeGroupSnapshotRequest) (*GetVolumeGroupSnapshotResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method GetVolumeGroupSnapshot not implemented")
+}
+
+func RegisterGroupControllerServer(s *grpc.Server, srv GroupControllerServer) {
+ s.RegisterService(&_GroupController_serviceDesc, srv)
+}
+
+func _GroupController_GroupControllerGetCapabilities_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(GroupControllerGetCapabilitiesRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(GroupControllerServer).GroupControllerGetCapabilities(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/csi.v1.GroupController/GroupControllerGetCapabilities",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(GroupControllerServer).GroupControllerGetCapabilities(ctx, req.(*GroupControllerGetCapabilitiesRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _GroupController_CreateVolumeGroupSnapshot_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(CreateVolumeGroupSnapshotRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(GroupControllerServer).CreateVolumeGroupSnapshot(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/csi.v1.GroupController/CreateVolumeGroupSnapshot",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(GroupControllerServer).CreateVolumeGroupSnapshot(ctx, req.(*CreateVolumeGroupSnapshotRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _GroupController_DeleteVolumeGroupSnapshot_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(DeleteVolumeGroupSnapshotRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(GroupControllerServer).DeleteVolumeGroupSnapshot(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/csi.v1.GroupController/DeleteVolumeGroupSnapshot",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(GroupControllerServer).DeleteVolumeGroupSnapshot(ctx, req.(*DeleteVolumeGroupSnapshotRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _GroupController_GetVolumeGroupSnapshot_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(GetVolumeGroupSnapshotRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(GroupControllerServer).GetVolumeGroupSnapshot(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/csi.v1.GroupController/GetVolumeGroupSnapshot",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(GroupControllerServer).GetVolumeGroupSnapshot(ctx, req.(*GetVolumeGroupSnapshotRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+var _GroupController_serviceDesc = grpc.ServiceDesc{
+ ServiceName: "csi.v1.GroupController",
+ HandlerType: (*GroupControllerServer)(nil),
+ Methods: []grpc.MethodDesc{
+ {
+ MethodName: "GroupControllerGetCapabilities",
+ Handler: _GroupController_GroupControllerGetCapabilities_Handler,
+ },
+ {
+ MethodName: "CreateVolumeGroupSnapshot",
+ Handler: _GroupController_CreateVolumeGroupSnapshot_Handler,
+ },
+ {
+ MethodName: "DeleteVolumeGroupSnapshot",
+ Handler: _GroupController_DeleteVolumeGroupSnapshot_Handler,
+ },
+ {
+ MethodName: "GetVolumeGroupSnapshot",
+ Handler: _GroupController_GetVolumeGroupSnapshot_Handler,
+ },
+ },
+ Streams: []grpc.StreamDesc{},
+ Metadata: "github.com/container-storage-interface/spec/csi.proto",
+}
+
// NodeClient is the client API for Node service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
diff --git a/vendor/github.com/emicklei/go-restful/v3/CHANGES.md b/vendor/github.com/emicklei/go-restful/v3/CHANGES.md
index 74a378157a..352018e703 100644
--- a/vendor/github.com/emicklei/go-restful/v3/CHANGES.md
+++ b/vendor/github.com/emicklei/go-restful/v3/CHANGES.md
@@ -1,10 +1,26 @@
# Change history of go-restful
-## [v3.9.0] - 20221-07-21
+## [v3.10.2] - 2023-03-09
+
+- introduced MergePathStrategy to be able to revert behaviour of path concatenation to 3.9.0
+ see comment in Readme how to customize this behaviour.
+
+## [v3.10.1] - 2022-11-19
+
+- fix broken 3.10.0 by using path package for joining paths
+
+## [v3.10.0] - 2022-10-11 - BROKEN
+
+- changed tokenizer to match std route match behavior; do not trimright the path (#511)
+- Add MIME_ZIP (#512)
+- Add MIME_ZIP and HEADER_ContentDisposition (#513)
+- Changed how to get query parameter issue #510
+
+## [v3.9.0] - 2022-07-21
- add support for http.Handler implementations to work as FilterFunction, issue #504 (thanks to https://github.com/ggicci)
-## [v3.8.0] - 20221-06-06
+## [v3.8.0] - 2022-06-06
- use exact matching of allowed domain entries, issue #489 (#493)
- this changes fixes [security] Authorization Bypass Through User-Controlled Key
diff --git a/vendor/github.com/emicklei/go-restful/v3/README.md b/vendor/github.com/emicklei/go-restful/v3/README.md
index 0625359dc4..85da90128e 100644
--- a/vendor/github.com/emicklei/go-restful/v3/README.md
+++ b/vendor/github.com/emicklei/go-restful/v3/README.md
@@ -96,6 +96,10 @@ There are several hooks to customize the behavior of the go-restful package.
- Compression
- Encoders for other serializers
- Use [jsoniter](https://github.com/json-iterator/go) by building this package using a build tag, e.g. `go build -tags=jsoniter .`
+- Use the variable `MergePathStrategy` to change the behaviour of composing the Route path given a root path and a local route path
+ - versions >= 3.10.1 has set the value to `PathJoinStrategy` that fixes a reported [security issue](https://github.com/advisories/GHSA-r48q-9g5r-8q2h) but may cause your services not to work correctly anymore.
+ - versions <= 3.9 had the behaviour that can be restored in newer versions by setting the value to `TrimSlashStrategy`.
+ - you can set value to a custom implementation (must implement MergePathStrategyFunc)
## Resources
diff --git a/vendor/github.com/emicklei/go-restful/v3/constants.go b/vendor/github.com/emicklei/go-restful/v3/constants.go
index 203439c5e5..2328bde6c7 100644
--- a/vendor/github.com/emicklei/go-restful/v3/constants.go
+++ b/vendor/github.com/emicklei/go-restful/v3/constants.go
@@ -7,12 +7,14 @@ package restful
const (
MIME_XML = "application/xml" // Accept or Content-Type used in Consumes() and/or Produces()
MIME_JSON = "application/json" // Accept or Content-Type used in Consumes() and/or Produces()
+ MIME_ZIP = "application/zip" // Accept or Content-Type used in Consumes() and/or Produces()
MIME_OCTET = "application/octet-stream" // If Content-Type is not present in request, use the default
HEADER_Allow = "Allow"
HEADER_Accept = "Accept"
HEADER_Origin = "Origin"
HEADER_ContentType = "Content-Type"
+ HEADER_ContentDisposition = "Content-Disposition"
HEADER_LastModified = "Last-Modified"
HEADER_AcceptEncoding = "Accept-Encoding"
HEADER_ContentEncoding = "Content-Encoding"
diff --git a/vendor/github.com/emicklei/go-restful/v3/request.go b/vendor/github.com/emicklei/go-restful/v3/request.go
index 5725a07595..0020095e86 100644
--- a/vendor/github.com/emicklei/go-restful/v3/request.go
+++ b/vendor/github.com/emicklei/go-restful/v3/request.go
@@ -31,7 +31,8 @@ func NewRequest(httpRequest *http.Request) *Request {
// a "Unable to unmarshal content of type:" response is returned.
// Valid values are restful.MIME_JSON and restful.MIME_XML
// Example:
-// restful.DefaultRequestContentType(restful.MIME_JSON)
+//
+// restful.DefaultRequestContentType(restful.MIME_JSON)
func DefaultRequestContentType(mime string) {
defaultRequestContentType = mime
}
@@ -48,7 +49,7 @@ func (r *Request) PathParameters() map[string]string {
// QueryParameter returns the (first) Query parameter value by its name
func (r *Request) QueryParameter(name string) string {
- return r.Request.FormValue(name)
+ return r.Request.URL.Query().Get(name)
}
// QueryParameters returns the all the query parameters values by name
diff --git a/vendor/github.com/emicklei/go-restful/v3/response.go b/vendor/github.com/emicklei/go-restful/v3/response.go
index 8f0b56aa2d..a41a92cc2c 100644
--- a/vendor/github.com/emicklei/go-restful/v3/response.go
+++ b/vendor/github.com/emicklei/go-restful/v3/response.go
@@ -109,6 +109,9 @@ func (r *Response) EntityWriter() (EntityReaderWriter, bool) {
if DefaultResponseMimeType == MIME_XML {
return entityAccessRegistry.accessorAt(MIME_XML)
}
+ if DefaultResponseMimeType == MIME_ZIP {
+ return entityAccessRegistry.accessorAt(MIME_ZIP)
+ }
// Fallback to whatever the route says it can produce.
// https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
for _, each := range r.routeProduces {
diff --git a/vendor/github.com/emicklei/go-restful/v3/route.go b/vendor/github.com/emicklei/go-restful/v3/route.go
index 193f4a6b01..ea05b3da88 100644
--- a/vendor/github.com/emicklei/go-restful/v3/route.go
+++ b/vendor/github.com/emicklei/go-restful/v3/route.go
@@ -164,7 +164,7 @@ func tokenizePath(path string) []string {
if "/" == path {
return nil
}
- return strings.Split(strings.Trim(path, "/"), "/")
+ return strings.Split(strings.TrimLeft(path, "/"), "/")
}
// for debugging
@@ -176,3 +176,5 @@ func (r *Route) String() string {
func (r *Route) EnableContentEncoding(enabled bool) {
r.contentEncodingEnabled = &enabled
}
+
+var TrimRightSlashEnabled = false
diff --git a/vendor/github.com/emicklei/go-restful/v3/route_builder.go b/vendor/github.com/emicklei/go-restful/v3/route_builder.go
index 23641b6dd5..827f471de0 100644
--- a/vendor/github.com/emicklei/go-restful/v3/route_builder.go
+++ b/vendor/github.com/emicklei/go-restful/v3/route_builder.go
@@ -7,6 +7,7 @@ package restful
import (
"fmt"
"os"
+ "path"
"reflect"
"runtime"
"strings"
@@ -46,11 +47,12 @@ type RouteBuilder struct {
// Do evaluates each argument with the RouteBuilder itself.
// This allows you to follow DRY principles without breaking the fluent programming style.
// Example:
-// ws.Route(ws.DELETE("/{name}").To(t.deletePerson).Do(Returns200, Returns500))
//
-// func Returns500(b *RouteBuilder) {
-// b.Returns(500, "Internal Server Error", restful.ServiceError{})
-// }
+// ws.Route(ws.DELETE("/{name}").To(t.deletePerson).Do(Returns200, Returns500))
+//
+// func Returns500(b *RouteBuilder) {
+// b.Returns(500, "Internal Server Error", restful.ServiceError{})
+// }
func (b *RouteBuilder) Do(oneArgBlocks ...func(*RouteBuilder)) *RouteBuilder {
for _, each := range oneArgBlocks {
each(b)
@@ -351,8 +353,28 @@ func (b *RouteBuilder) Build() Route {
return route
}
-func concatPath(path1, path2 string) string {
- return strings.TrimRight(path1, "/") + "/" + strings.TrimLeft(path2, "/")
+type MergePathStrategyFunc func(rootPath, routePath string) string
+
+var (
+ // behavior >= 3.10
+ PathJoinStrategy = func(rootPath, routePath string) string {
+ return path.Join(rootPath, routePath)
+ }
+
+ // behavior <= 3.9
+ TrimSlashStrategy = func(rootPath, routePath string) string {
+ return strings.TrimRight(rootPath, "/") + "/" + strings.TrimLeft(routePath, "/")
+ }
+
+ // MergePathStrategy is the active strategy for merging a Route path when building the routing of all WebServices.
+ // The value is set to PathJoinStrategy
+ // PathJoinStrategy is a strategy that is more strict [Security - PRISMA-2022-0227]
+ MergePathStrategy = PathJoinStrategy
+)
+
+// merge two paths using the current (package global) merge path strategy.
+func concatPath(rootPath, routePath string) string {
+ return MergePathStrategy(rootPath, routePath)
}
var anonymousFuncCount int32
diff --git a/vendor/github.com/go-openapi/jsonreference/internal/normalize_url.go b/vendor/github.com/go-openapi/jsonreference/internal/normalize_url.go
index fb376fce2d..f0610cf1e5 100644
--- a/vendor/github.com/go-openapi/jsonreference/internal/normalize_url.go
+++ b/vendor/github.com/go-openapi/jsonreference/internal/normalize_url.go
@@ -26,11 +26,16 @@ var rxDupSlashes = regexp.MustCompile(`/{2,}`)
// - FlagLowercaseHost
// - FlagRemoveDefaultPort
// - FlagRemoveDuplicateSlashes (and this was mixed in with the |)
+//
+// This also normalizes the URL into its urlencoded form by removing RawPath and RawFragment.
func NormalizeURL(u *url.URL) {
lowercaseScheme(u)
lowercaseHost(u)
removeDefaultPort(u)
removeDuplicateSlashes(u)
+
+ u.RawPath = ""
+ u.RawFragment = ""
}
func lowercaseScheme(u *url.URL) {
diff --git a/vendor/github.com/go-openapi/swag/util.go b/vendor/github.com/go-openapi/swag/util.go
index f78ab684a0..d971fbe34b 100644
--- a/vendor/github.com/go-openapi/swag/util.go
+++ b/vendor/github.com/go-openapi/swag/util.go
@@ -341,12 +341,21 @@ type zeroable interface {
// IsZero returns true when the value passed into the function is a zero value.
// This allows for safer checking of interface values.
func IsZero(data interface{}) bool {
+ v := reflect.ValueOf(data)
+ // check for nil data
+ switch v.Kind() {
+ case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
+ if v.IsNil() {
+ return true
+ }
+ }
+
// check for things that have an IsZero method instead
if vv, ok := data.(zeroable); ok {
return vv.IsZero()
}
+
// continue with slightly more complex reflection
- v := reflect.ValueOf(data)
switch v.Kind() {
case reflect.String:
return v.Len() == 0
@@ -358,14 +367,13 @@ func IsZero(data interface{}) bool {
return v.Uint() == 0
case reflect.Float32, reflect.Float64:
return v.Float() == 0
- case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
- return v.IsNil()
case reflect.Struct, reflect.Array:
return reflect.DeepEqual(data, reflect.Zero(v.Type()).Interface())
case reflect.Invalid:
return true
+ default:
+ return false
}
- return false
}
// AddInitialisms add additional initialisms
diff --git a/vendor/github.com/golang-jwt/jwt/v5/.gitignore b/vendor/github.com/golang-jwt/jwt/v5/.gitignore
new file mode 100644
index 0000000000..09573e0169
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/.gitignore
@@ -0,0 +1,4 @@
+.DS_Store
+bin
+.idea/
+
diff --git a/vendor/github.com/jongio/azidext/go/azidext/LICENSE b/vendor/github.com/golang-jwt/jwt/v5/LICENSE
similarity index 91%
rename from vendor/github.com/jongio/azidext/go/azidext/LICENSE
rename to vendor/github.com/golang-jwt/jwt/v5/LICENSE
index 21cbc94a68..35dbc25204 100644
--- a/vendor/github.com/jongio/azidext/go/azidext/LICENSE
+++ b/vendor/github.com/golang-jwt/jwt/v5/LICENSE
@@ -1,7 +1,9 @@
-Copyright 2020 Jon Gallant
+Copyright (c) 2012 Dave Grijalva
+Copyright (c) 2021 golang-jwt maintainers
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
\ No newline at end of file
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
diff --git a/vendor/github.com/golang-jwt/jwt/v5/MIGRATION_GUIDE.md b/vendor/github.com/golang-jwt/jwt/v5/MIGRATION_GUIDE.md
new file mode 100644
index 0000000000..6ad1c22bbe
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/MIGRATION_GUIDE.md
@@ -0,0 +1,185 @@
+# Migration Guide (v5.0.0)
+
+Version `v5` contains a major rework of core functionalities in the `jwt-go`
+library. This includes support for several validation options as well as a
+re-design of the `Claims` interface. Lastly, we reworked how errors work under
+the hood, which should provide a better overall developer experience.
+
+Starting from [v5.0.0](https://github.com/golang-jwt/jwt/releases/tag/v5.0.0),
+the import path will be:
+
+ "github.com/golang-jwt/jwt/v5"
+
+For most users, changing the import path *should* suffice. However, since we
+intentionally changed and cleaned some of the public API, existing programs
+might need to be updated. The following sections describe significant changes
+and corresponding updates for existing programs.
+
+## Parsing and Validation Options
+
+Under the hood, a new `validator` struct takes care of validating the claims. A
+long awaited feature has been the option to fine-tune the validation of tokens.
+This is now possible with several `ParserOption` functions that can be appended
+to most `Parse` functions, such as `ParseWithClaims`. The most important options
+and changes are:
+ * Added `WithLeeway` to support specifying the leeway that is allowed when
+ validating time-based claims, such as `exp` or `nbf`.
+ * Changed default behavior to not check the `iat` claim. Usage of this claim
+ is OPTIONAL according to the JWT RFC. The claim itself is also purely
+ informational according to the RFC, so a strict validation failure is not
+ recommended. If you want to check for sensible values in these claims,
+ please use the `WithIssuedAt` parser option.
+ * Added `WithAudience`, `WithSubject` and `WithIssuer` to support checking for
+ expected `aud`, `sub` and `iss`.
+ * Added `WithStrictDecoding` and `WithPaddingAllowed` options to allow
+ previously global settings to enable base64 strict encoding and the parsing
+ of base64 strings with padding. The latter is strictly speaking against the
+ standard, but unfortunately some of the major identity providers issue some
+ of these incorrect tokens. Both options are disabled by default.
+
+## Changes to the `Claims` interface
+
+### Complete Restructuring
+
+Previously, the claims interface was satisfied with an implementation of a
+`Valid() error` function. This had several issues:
+ * The different claim types (struct claims, map claims, etc.) then contained
+ similar (but not 100 % identical) code of how this validation was done. This
+ lead to a lot of (almost) duplicate code and was hard to maintain
+ * It was not really semantically close to what a "claim" (or a set of claims)
+ really is; which is a list of defined key/value pairs with a certain
+ semantic meaning.
+
+Since all the validation functionality is now extracted into the validator, all
+`VerifyXXX` and `Valid` functions have been removed from the `Claims` interface.
+Instead, the interface now represents a list of getters to retrieve values with
+a specific meaning. This allows us to completely decouple the validation logic
+with the underlying storage representation of the claim, which could be a
+struct, a map or even something stored in a database.
+
+```go
+type Claims interface {
+ GetExpirationTime() (*NumericDate, error)
+ GetIssuedAt() (*NumericDate, error)
+ GetNotBefore() (*NumericDate, error)
+ GetIssuer() (string, error)
+ GetSubject() (string, error)
+ GetAudience() (ClaimStrings, error)
+}
+```
+
+### Supported Claim Types and Removal of `StandardClaims`
+
+The two standard claim types supported by this library, `MapClaims` and
+`RegisteredClaims` both implement the necessary functions of this interface. The
+old `StandardClaims` struct, which has already been deprecated in `v4` is now
+removed.
+
+Users using custom claims, in most cases, will not experience any changes in the
+behavior as long as they embedded `RegisteredClaims`. If they created a new
+claim type from scratch, they now need to implemented the proper getter
+functions.
+
+### Migrating Application Specific Logic of the old `Valid`
+
+Previously, users could override the `Valid` method in a custom claim, for
+example to extend the validation with application-specific claims. However, this
+was always very dangerous, since once could easily disable the standard
+validation and signature checking.
+
+In order to avoid that, while still supporting the use-case, a new
+`ClaimsValidator` interface has been introduced. This interface consists of the
+`Validate() error` function. If the validator sees, that a `Claims` struct
+implements this interface, the errors returned to the `Validate` function will
+be *appended* to the regular standard validation. It is not possible to disable
+the standard validation anymore (even only by accident).
+
+Usage examples can be found in [example_test.go](./example_test.go), to build
+claims structs like the following.
+
+```go
+// MyCustomClaims includes all registered claims, plus Foo.
+type MyCustomClaims struct {
+ Foo string `json:"foo"`
+ jwt.RegisteredClaims
+}
+
+// Validate can be used to execute additional application-specific claims
+// validation.
+func (m MyCustomClaims) Validate() error {
+ if m.Foo != "bar" {
+ return errors.New("must be foobar")
+ }
+
+ return nil
+}
+```
+
+## Changes to the `Token` and `Parser` struct
+
+The previously global functions `DecodeSegment` and `EncodeSegment` were moved
+to the `Parser` and `Token` struct respectively. This will allow us in the
+future to configure the behavior of these two based on options supplied on the
+parser or the token (creation). This also removes two previously global
+variables and moves them to parser options `WithStrictDecoding` and
+`WithPaddingAllowed`.
+
+In order to do that, we had to adjust the way signing methods work. Previously
+they were given a base64 encoded signature in `Verify` and were expected to
+return a base64 encoded version of the signature in `Sign`, both as a `string`.
+However, this made it necessary to have `DecodeSegment` and `EncodeSegment`
+global and was a less than perfect design because we were repeating
+encoding/decoding steps for all signing methods. Now, `Sign` and `Verify`
+operate on a decoded signature as a `[]byte`, which feels more natural for a
+cryptographic operation anyway. Lastly, `Parse` and `SignedString` take care of
+the final encoding/decoding part.
+
+In addition to that, we also changed the `Signature` field on `Token` from a
+`string` to `[]byte` and this is also now populated with the decoded form. This
+is also more consistent, because the other parts of the JWT, mainly `Header` and
+`Claims` were already stored in decoded form in `Token`. Only the signature was
+stored in base64 encoded form, which was redundant with the information in the
+`Raw` field, which contains the complete token as base64.
+
+```go
+type Token struct {
+ Raw string // Raw contains the raw token
+ Method SigningMethod // Method is the signing method used or to be used
+ Header map[string]interface{} // Header is the first segment of the token in decoded form
+ Claims Claims // Claims is the second segment of the token in decoded form
+ Signature []byte // Signature is the third segment of the token in decoded form
+ Valid bool // Valid specifies if the token is valid
+}
+```
+
+Most (if not all) of these changes should not impact the normal usage of this
+library. Only users directly accessing the `Signature` field as well as
+developers of custom signing methods should be affected.
+
+# Migration Guide (v4.0.0)
+
+Starting from [v4.0.0](https://github.com/golang-jwt/jwt/releases/tag/v4.0.0),
+the import path will be:
+
+ "github.com/golang-jwt/jwt/v4"
+
+The `/v4` version will be backwards compatible with existing `v3.x.y` tags in
+this repo, as well as `github.com/dgrijalva/jwt-go`. For most users this should
+be a drop-in replacement, if you're having troubles migrating, please open an
+issue.
+
+You can replace all occurrences of `github.com/dgrijalva/jwt-go` or
+`github.com/golang-jwt/jwt` with `github.com/golang-jwt/jwt/v5`, either manually
+or by using tools such as `sed` or `gofmt`.
+
+And then you'd typically run:
+
+```
+go get github.com/golang-jwt/jwt/v4
+go mod tidy
+```
+
+# Older releases (before v3.2.0)
+
+The original migration guide for older releases can be found at
+https://github.com/dgrijalva/jwt-go/blob/master/MIGRATION_GUIDE.md.
diff --git a/vendor/github.com/golang-jwt/jwt/v5/README.md b/vendor/github.com/golang-jwt/jwt/v5/README.md
new file mode 100644
index 0000000000..964598a317
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/README.md
@@ -0,0 +1,167 @@
+# jwt-go
+
+[![build](https://github.com/golang-jwt/jwt/actions/workflows/build.yml/badge.svg)](https://github.com/golang-jwt/jwt/actions/workflows/build.yml)
+[![Go
+Reference](https://pkg.go.dev/badge/github.com/golang-jwt/jwt/v5.svg)](https://pkg.go.dev/github.com/golang-jwt/jwt/v5)
+[![Coverage Status](https://coveralls.io/repos/github/golang-jwt/jwt/badge.svg?branch=main)](https://coveralls.io/github/golang-jwt/jwt?branch=main)
+
+A [go](http://www.golang.org) (or 'golang' for search engine friendliness)
+implementation of [JSON Web
+Tokens](https://datatracker.ietf.org/doc/html/rfc7519).
+
+Starting with [v4.0.0](https://github.com/golang-jwt/jwt/releases/tag/v4.0.0)
+this project adds Go module support, but maintains backwards compatibility with
+older `v3.x.y` tags and upstream `github.com/dgrijalva/jwt-go`. See the
+[`MIGRATION_GUIDE.md`](./MIGRATION_GUIDE.md) for more information. Version
+v5.0.0 introduces major improvements to the validation of tokens, but is not
+entirely backwards compatible.
+
+> After the original author of the library suggested migrating the maintenance
+> of `jwt-go`, a dedicated team of open source maintainers decided to clone the
+> existing library into this repository. See
+> [dgrijalva/jwt-go#462](https://github.com/dgrijalva/jwt-go/issues/462) for a
+> detailed discussion on this topic.
+
+
+**SECURITY NOTICE:** Some older versions of Go have a security issue in the
+crypto/elliptic. Recommendation is to upgrade to at least 1.15 See issue
+[dgrijalva/jwt-go#216](https://github.com/dgrijalva/jwt-go/issues/216) for more
+detail.
+
+**SECURITY NOTICE:** It's important that you [validate the `alg` presented is
+what you
+expect](https://auth0.com/blog/critical-vulnerabilities-in-json-web-token-libraries/).
+This library attempts to make it easy to do the right thing by requiring key
+types match the expected alg, but you should take the extra step to verify it in
+your usage. See the examples provided.
+
+### Supported Go versions
+
+Our support of Go versions is aligned with Go's [version release
+policy](https://golang.org/doc/devel/release#policy). So we will support a major
+version of Go until there are two newer major releases. We no longer support
+building jwt-go with unsupported Go versions, as these contain security
+vulnerabilities which will not be fixed.
+
+## What the heck is a JWT?
+
+JWT.io has [a great introduction](https://jwt.io/introduction) to JSON Web
+Tokens.
+
+In short, it's a signed JSON object that does something useful (for example,
+authentication). It's commonly used for `Bearer` tokens in Oauth 2. A token is
+made of three parts, separated by `.`'s. The first two parts are JSON objects,
+that have been [base64url](https://datatracker.ietf.org/doc/html/rfc4648)
+encoded. The last part is the signature, encoded the same way.
+
+The first part is called the header. It contains the necessary information for
+verifying the last part, the signature. For example, which encryption method
+was used for signing and what key was used.
+
+The part in the middle is the interesting bit. It's called the Claims and
+contains the actual stuff you care about. Refer to [RFC
+7519](https://datatracker.ietf.org/doc/html/rfc7519) for information about
+reserved keys and the proper way to add your own.
+
+## What's in the box?
+
+This library supports the parsing and verification as well as the generation and
+signing of JWTs. Current supported signing algorithms are HMAC SHA, RSA,
+RSA-PSS, and ECDSA, though hooks are present for adding your own.
+
+## Installation Guidelines
+
+1. To install the jwt package, you first need to have
+ [Go](https://go.dev/doc/install) installed, then you can use the command
+ below to add `jwt-go` as a dependency in your Go program.
+
+```sh
+go get -u github.com/golang-jwt/jwt/v5
+```
+
+2. Import it in your code:
+
+```go
+import "github.com/golang-jwt/jwt/v5"
+```
+
+## Usage
+
+A detailed usage guide, including how to sign and verify tokens can be found on
+our [documentation website](https://golang-jwt.github.io/jwt/usage/create/).
+
+## Examples
+
+See [the project documentation](https://pkg.go.dev/github.com/golang-jwt/jwt/v5)
+for examples of usage:
+
+* [Simple example of parsing and validating a
+ token](https://pkg.go.dev/github.com/golang-jwt/jwt/v5#example-Parse-Hmac)
+* [Simple example of building and signing a
+ token](https://pkg.go.dev/github.com/golang-jwt/jwt/v5#example-New-Hmac)
+* [Directory of
+ Examples](https://pkg.go.dev/github.com/golang-jwt/jwt/v5#pkg-examples)
+
+## Compliance
+
+This library was last reviewed to comply with [RFC
+7519](https://datatracker.ietf.org/doc/html/rfc7519) dated May 2015 with a few
+notable differences:
+
+* In order to protect against accidental use of [Unsecured
+ JWTs](https://datatracker.ietf.org/doc/html/rfc7519#section-6), tokens using
+ `alg=none` will only be accepted if the constant
+ `jwt.UnsafeAllowNoneSignatureType` is provided as the key.
+
+## Project Status & Versioning
+
+This library is considered production ready. Feedback and feature requests are
+appreciated. The API should be considered stable. There should be very few
+backwards-incompatible changes outside of major version updates (and only with
+good reason).
+
+This project uses [Semantic Versioning 2.0.0](http://semver.org). Accepted pull
+requests will land on `main`. Periodically, versions will be tagged from
+`main`. You can find all the releases on [the project releases
+page](https://github.com/golang-jwt/jwt/releases).
+
+**BREAKING CHANGES:*** A full list of breaking changes is available in
+`VERSION_HISTORY.md`. See `MIGRATION_GUIDE.md` for more information on updating
+your code.
+
+## Extensions
+
+This library publishes all the necessary components for adding your own signing
+methods or key functions. Simply implement the `SigningMethod` interface and
+register a factory method using `RegisterSigningMethod` or provide a
+`jwt.Keyfunc`.
+
+A common use case would be integrating with different 3rd party signature
+providers, like key management services from various cloud providers or Hardware
+Security Modules (HSMs) or to implement additional standards.
+
+| Extension | Purpose | Repo |
+| --------- | -------------------------------------------------------------------------------------------------------- | ------------------------------------------ |
+| GCP | Integrates with multiple Google Cloud Platform signing tools (AppEngine, IAM API, Cloud KMS) | https://github.com/someone1/gcp-jwt-go |
+| AWS | Integrates with AWS Key Management Service, KMS | https://github.com/matelang/jwt-go-aws-kms |
+| JWKS | Provides support for JWKS ([RFC 7517](https://datatracker.ietf.org/doc/html/rfc7517)) as a `jwt.Keyfunc` | https://github.com/MicahParks/keyfunc |
+
+*Disclaimer*: Unless otherwise specified, these integrations are maintained by
+third parties and should not be considered as a primary offer by any of the
+mentioned cloud providers
+
+## More
+
+Go package documentation can be found [on
+pkg.go.dev](https://pkg.go.dev/github.com/golang-jwt/jwt/v5). Additional
+documentation can be found on [our project
+page](https://golang-jwt.github.io/jwt/).
+
+The command line utility included in this project (cmd/jwt) provides a
+straightforward example of token creation and parsing as well as a useful tool
+for debugging your own integration. You'll also find several implementation
+examples in the documentation.
+
+[golang-jwt](https://github.com/orgs/golang-jwt) incorporates a modified version
+of the JWT logo, which is distributed under the terms of the [MIT
+License](https://github.com/jsonwebtoken/jsonwebtoken.github.io/blob/master/LICENSE.txt).
diff --git a/vendor/github.com/golang-jwt/jwt/v5/SECURITY.md b/vendor/github.com/golang-jwt/jwt/v5/SECURITY.md
new file mode 100644
index 0000000000..b08402c342
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/SECURITY.md
@@ -0,0 +1,19 @@
+# Security Policy
+
+## Supported Versions
+
+As of February 2022 (and until this document is updated), the latest version `v4` is supported.
+
+## Reporting a Vulnerability
+
+If you think you found a vulnerability, and even if you are not sure, please report it to jwt-go-security@googlegroups.com or one of the other [golang-jwt maintainers](https://github.com/orgs/golang-jwt/people). Please try be explicit, describe steps to reproduce the security issue with code example(s).
+
+You will receive a response within a timely manner. If the issue is confirmed, we will do our best to release a patch as soon as possible given the complexity of the problem.
+
+## Public Discussions
+
+Please avoid publicly discussing a potential security vulnerability.
+
+Let's take this offline and find a solution first, this limits the potential impact as much as possible.
+
+We appreciate your help!
diff --git a/vendor/github.com/golang-jwt/jwt/v5/VERSION_HISTORY.md b/vendor/github.com/golang-jwt/jwt/v5/VERSION_HISTORY.md
new file mode 100644
index 0000000000..b5039e49c1
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/VERSION_HISTORY.md
@@ -0,0 +1,137 @@
+# `jwt-go` Version History
+
+The following version history is kept for historic purposes. To retrieve the current changes of each version, please refer to the change-log of the specific release versions on https://github.com/golang-jwt/jwt/releases.
+
+## 4.0.0
+
+* Introduces support for Go modules. The `v4` version will be backwards compatible with `v3.x.y`.
+
+## 3.2.2
+
+* Starting from this release, we are adopting the policy to support the most 2 recent versions of Go currently available. By the time of this release, this is Go 1.15 and 1.16 ([#28](https://github.com/golang-jwt/jwt/pull/28)).
+* Fixed a potential issue that could occur when the verification of `exp`, `iat` or `nbf` was not required and contained invalid contents, i.e. non-numeric/date. Thanks for @thaJeztah for making us aware of that and @giorgos-f3 for originally reporting it to the formtech fork ([#40](https://github.com/golang-jwt/jwt/pull/40)).
+* Added support for EdDSA / ED25519 ([#36](https://github.com/golang-jwt/jwt/pull/36)).
+* Optimized allocations ([#33](https://github.com/golang-jwt/jwt/pull/33)).
+
+## 3.2.1
+
+* **Import Path Change**: See MIGRATION_GUIDE.md for tips on updating your code
+ * Changed the import path from `github.com/dgrijalva/jwt-go` to `github.com/golang-jwt/jwt`
+* Fixed type confusing issue between `string` and `[]string` in `VerifyAudience` ([#12](https://github.com/golang-jwt/jwt/pull/12)). This fixes CVE-2020-26160
+
+#### 3.2.0
+
+* Added method `ParseUnverified` to allow users to split up the tasks of parsing and validation
+* HMAC signing method returns `ErrInvalidKeyType` instead of `ErrInvalidKey` where appropriate
+* Added options to `request.ParseFromRequest`, which allows for an arbitrary list of modifiers to parsing behavior. Initial set include `WithClaims` and `WithParser`. Existing usage of this function will continue to work as before.
+* Deprecated `ParseFromRequestWithClaims` to simplify API in the future.
+
+#### 3.1.0
+
+* Improvements to `jwt` command line tool
+* Added `SkipClaimsValidation` option to `Parser`
+* Documentation updates
+
+#### 3.0.0
+
+* **Compatibility Breaking Changes**: See MIGRATION_GUIDE.md for tips on updating your code
+ * Dropped support for `[]byte` keys when using RSA signing methods. This convenience feature could contribute to security vulnerabilities involving mismatched key types with signing methods.
+ * `ParseFromRequest` has been moved to `request` subpackage and usage has changed
+ * The `Claims` property on `Token` is now type `Claims` instead of `map[string]interface{}`. The default value is type `MapClaims`, which is an alias to `map[string]interface{}`. This makes it possible to use a custom type when decoding claims.
+* Other Additions and Changes
+ * Added `Claims` interface type to allow users to decode the claims into a custom type
+ * Added `ParseWithClaims`, which takes a third argument of type `Claims`. Use this function instead of `Parse` if you have a custom type you'd like to decode into.
+ * Dramatically improved the functionality and flexibility of `ParseFromRequest`, which is now in the `request` subpackage
+ * Added `ParseFromRequestWithClaims` which is the `FromRequest` equivalent of `ParseWithClaims`
+ * Added new interface type `Extractor`, which is used for extracting JWT strings from http requests. Used with `ParseFromRequest` and `ParseFromRequestWithClaims`.
+ * Added several new, more specific, validation errors to error type bitmask
+ * Moved examples from README to executable example files
+ * Signing method registry is now thread safe
+ * Added new property to `ValidationError`, which contains the raw error returned by calls made by parse/verify (such as those returned by keyfunc or json parser)
+
+#### 2.7.0
+
+This will likely be the last backwards compatible release before 3.0.0, excluding essential bug fixes.
+
+* Added new option `-show` to the `jwt` command that will just output the decoded token without verifying
+* Error text for expired tokens includes how long it's been expired
+* Fixed incorrect error returned from `ParseRSAPublicKeyFromPEM`
+* Documentation updates
+
+#### 2.6.0
+
+* Exposed inner error within ValidationError
+* Fixed validation errors when using UseJSONNumber flag
+* Added several unit tests
+
+#### 2.5.0
+
+* Added support for signing method none. You shouldn't use this. The API tries to make this clear.
+* Updated/fixed some documentation
+* Added more helpful error message when trying to parse tokens that begin with `BEARER `
+
+#### 2.4.0
+
+* Added new type, Parser, to allow for configuration of various parsing parameters
+ * You can now specify a list of valid signing methods. Anything outside this set will be rejected.
+ * You can now opt to use the `json.Number` type instead of `float64` when parsing token JSON
+* Added support for [Travis CI](https://travis-ci.org/dgrijalva/jwt-go)
+* Fixed some bugs with ECDSA parsing
+
+#### 2.3.0
+
+* Added support for ECDSA signing methods
+* Added support for RSA PSS signing methods (requires go v1.4)
+
+#### 2.2.0
+
+* Gracefully handle a `nil` `Keyfunc` being passed to `Parse`. Result will now be the parsed token and an error, instead of a panic.
+
+#### 2.1.0
+
+Backwards compatible API change that was missed in 2.0.0.
+
+* The `SignedString` method on `Token` now takes `interface{}` instead of `[]byte`
+
+#### 2.0.0
+
+There were two major reasons for breaking backwards compatibility with this update. The first was a refactor required to expand the width of the RSA and HMAC-SHA signing implementations. There will likely be no required code changes to support this change.
+
+The second update, while unfortunately requiring a small change in integration, is required to open up this library to other signing methods. Not all keys used for all signing methods have a single standard on-disk representation. Requiring `[]byte` as the type for all keys proved too limiting. Additionally, this implementation allows for pre-parsed tokens to be reused, which might matter in an application that parses a high volume of tokens with a small set of keys. Backwards compatibilty has been maintained for passing `[]byte` to the RSA signing methods, but they will also accept `*rsa.PublicKey` and `*rsa.PrivateKey`.
+
+It is likely the only integration change required here will be to change `func(t *jwt.Token) ([]byte, error)` to `func(t *jwt.Token) (interface{}, error)` when calling `Parse`.
+
+* **Compatibility Breaking Changes**
+ * `SigningMethodHS256` is now `*SigningMethodHMAC` instead of `type struct`
+ * `SigningMethodRS256` is now `*SigningMethodRSA` instead of `type struct`
+ * `KeyFunc` now returns `interface{}` instead of `[]byte`
+ * `SigningMethod.Sign` now takes `interface{}` instead of `[]byte` for the key
+ * `SigningMethod.Verify` now takes `interface{}` instead of `[]byte` for the key
+* Renamed type `SigningMethodHS256` to `SigningMethodHMAC`. Specific sizes are now just instances of this type.
+ * Added public package global `SigningMethodHS256`
+ * Added public package global `SigningMethodHS384`
+ * Added public package global `SigningMethodHS512`
+* Renamed type `SigningMethodRS256` to `SigningMethodRSA`. Specific sizes are now just instances of this type.
+ * Added public package global `SigningMethodRS256`
+ * Added public package global `SigningMethodRS384`
+ * Added public package global `SigningMethodRS512`
+* Moved sample private key for HMAC tests from an inline value to a file on disk. Value is unchanged.
+* Refactored the RSA implementation to be easier to read
+* Exposed helper methods `ParseRSAPrivateKeyFromPEM` and `ParseRSAPublicKeyFromPEM`
+
+## 1.0.2
+
+* Fixed bug in parsing public keys from certificates
+* Added more tests around the parsing of keys for RS256
+* Code refactoring in RS256 implementation. No functional changes
+
+## 1.0.1
+
+* Fixed panic if RS256 signing method was passed an invalid key
+
+## 1.0.0
+
+* First versioned release
+* API stabilized
+* Supports creating, signing, parsing, and validating JWT tokens
+* Supports RS256 and HS256 signing methods
diff --git a/vendor/github.com/golang-jwt/jwt/v5/claims.go b/vendor/github.com/golang-jwt/jwt/v5/claims.go
new file mode 100644
index 0000000000..d50ff3dad8
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/claims.go
@@ -0,0 +1,16 @@
+package jwt
+
+// Claims represent any form of a JWT Claims Set according to
+// https://datatracker.ietf.org/doc/html/rfc7519#section-4. In order to have a
+// common basis for validation, it is required that an implementation is able to
+// supply at least the claim names provided in
+// https://datatracker.ietf.org/doc/html/rfc7519#section-4.1 namely `exp`,
+// `iat`, `nbf`, `iss`, `sub` and `aud`.
+type Claims interface {
+ GetExpirationTime() (*NumericDate, error)
+ GetIssuedAt() (*NumericDate, error)
+ GetNotBefore() (*NumericDate, error)
+ GetIssuer() (string, error)
+ GetSubject() (string, error)
+ GetAudience() (ClaimStrings, error)
+}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/doc.go b/vendor/github.com/golang-jwt/jwt/v5/doc.go
new file mode 100644
index 0000000000..a86dc1a3b3
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/doc.go
@@ -0,0 +1,4 @@
+// Package jwt is a Go implementation of JSON Web Tokens: http://self-issued.info/docs/draft-jones-json-web-token.html
+//
+// See README.md for more info.
+package jwt
diff --git a/vendor/github.com/golang-jwt/jwt/v5/ecdsa.go b/vendor/github.com/golang-jwt/jwt/v5/ecdsa.go
new file mode 100644
index 0000000000..4ccae2a857
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/ecdsa.go
@@ -0,0 +1,134 @@
+package jwt
+
+import (
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/rand"
+ "errors"
+ "math/big"
+)
+
+var (
+ // Sadly this is missing from crypto/ecdsa compared to crypto/rsa
+ ErrECDSAVerification = errors.New("crypto/ecdsa: verification error")
+)
+
+// SigningMethodECDSA implements the ECDSA family of signing methods.
+// Expects *ecdsa.PrivateKey for signing and *ecdsa.PublicKey for verification
+type SigningMethodECDSA struct {
+ Name string
+ Hash crypto.Hash
+ KeySize int
+ CurveBits int
+}
+
+// Specific instances for EC256 and company
+var (
+ SigningMethodES256 *SigningMethodECDSA
+ SigningMethodES384 *SigningMethodECDSA
+ SigningMethodES512 *SigningMethodECDSA
+)
+
+func init() {
+ // ES256
+ SigningMethodES256 = &SigningMethodECDSA{"ES256", crypto.SHA256, 32, 256}
+ RegisterSigningMethod(SigningMethodES256.Alg(), func() SigningMethod {
+ return SigningMethodES256
+ })
+
+ // ES384
+ SigningMethodES384 = &SigningMethodECDSA{"ES384", crypto.SHA384, 48, 384}
+ RegisterSigningMethod(SigningMethodES384.Alg(), func() SigningMethod {
+ return SigningMethodES384
+ })
+
+ // ES512
+ SigningMethodES512 = &SigningMethodECDSA{"ES512", crypto.SHA512, 66, 521}
+ RegisterSigningMethod(SigningMethodES512.Alg(), func() SigningMethod {
+ return SigningMethodES512
+ })
+}
+
+func (m *SigningMethodECDSA) Alg() string {
+ return m.Name
+}
+
+// Verify implements token verification for the SigningMethod.
+// For this verify method, key must be an ecdsa.PublicKey struct
+func (m *SigningMethodECDSA) Verify(signingString string, sig []byte, key interface{}) error {
+ // Get the key
+ var ecdsaKey *ecdsa.PublicKey
+ switch k := key.(type) {
+ case *ecdsa.PublicKey:
+ ecdsaKey = k
+ default:
+ return ErrInvalidKeyType
+ }
+
+ if len(sig) != 2*m.KeySize {
+ return ErrECDSAVerification
+ }
+
+ r := big.NewInt(0).SetBytes(sig[:m.KeySize])
+ s := big.NewInt(0).SetBytes(sig[m.KeySize:])
+
+ // Create hasher
+ if !m.Hash.Available() {
+ return ErrHashUnavailable
+ }
+ hasher := m.Hash.New()
+ hasher.Write([]byte(signingString))
+
+ // Verify the signature
+ if verifystatus := ecdsa.Verify(ecdsaKey, hasher.Sum(nil), r, s); verifystatus {
+ return nil
+ }
+
+ return ErrECDSAVerification
+}
+
+// Sign implements token signing for the SigningMethod.
+// For this signing method, key must be an ecdsa.PrivateKey struct
+func (m *SigningMethodECDSA) Sign(signingString string, key interface{}) ([]byte, error) {
+ // Get the key
+ var ecdsaKey *ecdsa.PrivateKey
+ switch k := key.(type) {
+ case *ecdsa.PrivateKey:
+ ecdsaKey = k
+ default:
+ return nil, ErrInvalidKeyType
+ }
+
+ // Create the hasher
+ if !m.Hash.Available() {
+ return nil, ErrHashUnavailable
+ }
+
+ hasher := m.Hash.New()
+ hasher.Write([]byte(signingString))
+
+ // Sign the string and return r, s
+ if r, s, err := ecdsa.Sign(rand.Reader, ecdsaKey, hasher.Sum(nil)); err == nil {
+ curveBits := ecdsaKey.Curve.Params().BitSize
+
+ if m.CurveBits != curveBits {
+ return nil, ErrInvalidKey
+ }
+
+ keyBytes := curveBits / 8
+ if curveBits%8 > 0 {
+ keyBytes += 1
+ }
+
+ // We serialize the outputs (r and s) into big-endian byte arrays
+ // padded with zeros on the left to make sure the sizes work out.
+ // Output must be 2*keyBytes long.
+ out := make([]byte, 2*keyBytes)
+ r.FillBytes(out[0:keyBytes]) // r is assigned to the first half of output.
+ s.FillBytes(out[keyBytes:]) // s is assigned to the second half of output.
+
+ return out, nil
+ } else {
+ return nil, err
+ }
+}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/ecdsa_utils.go b/vendor/github.com/golang-jwt/jwt/v5/ecdsa_utils.go
new file mode 100644
index 0000000000..5700636d35
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/ecdsa_utils.go
@@ -0,0 +1,69 @@
+package jwt
+
+import (
+ "crypto/ecdsa"
+ "crypto/x509"
+ "encoding/pem"
+ "errors"
+)
+
+var (
+ ErrNotECPublicKey = errors.New("key is not a valid ECDSA public key")
+ ErrNotECPrivateKey = errors.New("key is not a valid ECDSA private key")
+)
+
+// ParseECPrivateKeyFromPEM parses a PEM encoded Elliptic Curve Private Key Structure
+func ParseECPrivateKeyFromPEM(key []byte) (*ecdsa.PrivateKey, error) {
+ var err error
+
+ // Parse PEM block
+ var block *pem.Block
+ if block, _ = pem.Decode(key); block == nil {
+ return nil, ErrKeyMustBePEMEncoded
+ }
+
+ // Parse the key
+ var parsedKey interface{}
+ if parsedKey, err = x509.ParseECPrivateKey(block.Bytes); err != nil {
+ if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil {
+ return nil, err
+ }
+ }
+
+ var pkey *ecdsa.PrivateKey
+ var ok bool
+ if pkey, ok = parsedKey.(*ecdsa.PrivateKey); !ok {
+ return nil, ErrNotECPrivateKey
+ }
+
+ return pkey, nil
+}
+
+// ParseECPublicKeyFromPEM parses a PEM encoded PKCS1 or PKCS8 public key
+func ParseECPublicKeyFromPEM(key []byte) (*ecdsa.PublicKey, error) {
+ var err error
+
+ // Parse PEM block
+ var block *pem.Block
+ if block, _ = pem.Decode(key); block == nil {
+ return nil, ErrKeyMustBePEMEncoded
+ }
+
+ // Parse the key
+ var parsedKey interface{}
+ if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil {
+ if cert, err := x509.ParseCertificate(block.Bytes); err == nil {
+ parsedKey = cert.PublicKey
+ } else {
+ return nil, err
+ }
+ }
+
+ var pkey *ecdsa.PublicKey
+ var ok bool
+ if pkey, ok = parsedKey.(*ecdsa.PublicKey); !ok {
+ return nil, ErrNotECPublicKey
+ }
+
+ return pkey, nil
+}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/ed25519.go b/vendor/github.com/golang-jwt/jwt/v5/ed25519.go
new file mode 100644
index 0000000000..3db00e4a23
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/ed25519.go
@@ -0,0 +1,80 @@
+package jwt
+
+import (
+ "errors"
+
+ "crypto"
+ "crypto/ed25519"
+ "crypto/rand"
+)
+
+var (
+ ErrEd25519Verification = errors.New("ed25519: verification error")
+)
+
+// SigningMethodEd25519 implements the EdDSA family.
+// Expects ed25519.PrivateKey for signing and ed25519.PublicKey for verification
+type SigningMethodEd25519 struct{}
+
+// Specific instance for EdDSA
+var (
+ SigningMethodEdDSA *SigningMethodEd25519
+)
+
+func init() {
+ SigningMethodEdDSA = &SigningMethodEd25519{}
+ RegisterSigningMethod(SigningMethodEdDSA.Alg(), func() SigningMethod {
+ return SigningMethodEdDSA
+ })
+}
+
+func (m *SigningMethodEd25519) Alg() string {
+ return "EdDSA"
+}
+
+// Verify implements token verification for the SigningMethod.
+// For this verify method, key must be an ed25519.PublicKey
+func (m *SigningMethodEd25519) Verify(signingString string, sig []byte, key interface{}) error {
+ var ed25519Key ed25519.PublicKey
+ var ok bool
+
+ if ed25519Key, ok = key.(ed25519.PublicKey); !ok {
+ return ErrInvalidKeyType
+ }
+
+ if len(ed25519Key) != ed25519.PublicKeySize {
+ return ErrInvalidKey
+ }
+
+ // Verify the signature
+ if !ed25519.Verify(ed25519Key, []byte(signingString), sig) {
+ return ErrEd25519Verification
+ }
+
+ return nil
+}
+
+// Sign implements token signing for the SigningMethod.
+// For this signing method, key must be an ed25519.PrivateKey
+func (m *SigningMethodEd25519) Sign(signingString string, key interface{}) ([]byte, error) {
+ var ed25519Key crypto.Signer
+ var ok bool
+
+ if ed25519Key, ok = key.(crypto.Signer); !ok {
+ return nil, ErrInvalidKeyType
+ }
+
+ if _, ok := ed25519Key.Public().(ed25519.PublicKey); !ok {
+ return nil, ErrInvalidKey
+ }
+
+ // Sign the string and return the result. ed25519 performs a two-pass hash
+ // as part of its algorithm. Therefore, we need to pass a non-prehashed
+ // message into the Sign function, as indicated by crypto.Hash(0)
+ sig, err := ed25519Key.Sign(rand.Reader, []byte(signingString), crypto.Hash(0))
+ if err != nil {
+ return nil, err
+ }
+
+ return sig, nil
+}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/ed25519_utils.go b/vendor/github.com/golang-jwt/jwt/v5/ed25519_utils.go
new file mode 100644
index 0000000000..cdb5e68e87
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/ed25519_utils.go
@@ -0,0 +1,64 @@
+package jwt
+
+import (
+ "crypto"
+ "crypto/ed25519"
+ "crypto/x509"
+ "encoding/pem"
+ "errors"
+)
+
+var (
+ ErrNotEdPrivateKey = errors.New("key is not a valid Ed25519 private key")
+ ErrNotEdPublicKey = errors.New("key is not a valid Ed25519 public key")
+)
+
+// ParseEdPrivateKeyFromPEM parses a PEM-encoded Edwards curve private key
+func ParseEdPrivateKeyFromPEM(key []byte) (crypto.PrivateKey, error) {
+ var err error
+
+ // Parse PEM block
+ var block *pem.Block
+ if block, _ = pem.Decode(key); block == nil {
+ return nil, ErrKeyMustBePEMEncoded
+ }
+
+ // Parse the key
+ var parsedKey interface{}
+ if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil {
+ return nil, err
+ }
+
+ var pkey ed25519.PrivateKey
+ var ok bool
+ if pkey, ok = parsedKey.(ed25519.PrivateKey); !ok {
+ return nil, ErrNotEdPrivateKey
+ }
+
+ return pkey, nil
+}
+
+// ParseEdPublicKeyFromPEM parses a PEM-encoded Edwards curve public key
+func ParseEdPublicKeyFromPEM(key []byte) (crypto.PublicKey, error) {
+ var err error
+
+ // Parse PEM block
+ var block *pem.Block
+ if block, _ = pem.Decode(key); block == nil {
+ return nil, ErrKeyMustBePEMEncoded
+ }
+
+ // Parse the key
+ var parsedKey interface{}
+ if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil {
+ return nil, err
+ }
+
+ var pkey ed25519.PublicKey
+ var ok bool
+ if pkey, ok = parsedKey.(ed25519.PublicKey); !ok {
+ return nil, ErrNotEdPublicKey
+ }
+
+ return pkey, nil
+}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/errors.go b/vendor/github.com/golang-jwt/jwt/v5/errors.go
new file mode 100644
index 0000000000..23bb616ddd
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/errors.go
@@ -0,0 +1,49 @@
+package jwt
+
+import (
+ "errors"
+ "strings"
+)
+
+var (
+ ErrInvalidKey = errors.New("key is invalid")
+ ErrInvalidKeyType = errors.New("key is of invalid type")
+ ErrHashUnavailable = errors.New("the requested hash function is unavailable")
+ ErrTokenMalformed = errors.New("token is malformed")
+ ErrTokenUnverifiable = errors.New("token is unverifiable")
+ ErrTokenSignatureInvalid = errors.New("token signature is invalid")
+ ErrTokenRequiredClaimMissing = errors.New("token is missing required claim")
+ ErrTokenInvalidAudience = errors.New("token has invalid audience")
+ ErrTokenExpired = errors.New("token is expired")
+ ErrTokenUsedBeforeIssued = errors.New("token used before issued")
+ ErrTokenInvalidIssuer = errors.New("token has invalid issuer")
+ ErrTokenInvalidSubject = errors.New("token has invalid subject")
+ ErrTokenNotValidYet = errors.New("token is not valid yet")
+ ErrTokenInvalidId = errors.New("token has invalid id")
+ ErrTokenInvalidClaims = errors.New("token has invalid claims")
+ ErrInvalidType = errors.New("invalid type for claim")
+)
+
+// joinedError is an error type that works similar to what [errors.Join]
+// produces, with the exception that it has a nice error string; mainly its
+// error messages are concatenated using a comma, rather than a newline.
+type joinedError struct {
+ errs []error
+}
+
+func (je joinedError) Error() string {
+ msg := []string{}
+ for _, err := range je.errs {
+ msg = append(msg, err.Error())
+ }
+
+ return strings.Join(msg, ", ")
+}
+
+// joinErrors joins together multiple errors. Useful for scenarios where
+// multiple errors next to each other occur, e.g., in claims validation.
+func joinErrors(errs ...error) error {
+ return &joinedError{
+ errs: errs,
+ }
+}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/errors_go1_20.go b/vendor/github.com/golang-jwt/jwt/v5/errors_go1_20.go
new file mode 100644
index 0000000000..a893d355e1
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/errors_go1_20.go
@@ -0,0 +1,47 @@
+//go:build go1.20
+// +build go1.20
+
+package jwt
+
+import (
+ "fmt"
+)
+
+// Unwrap implements the multiple error unwrapping for this error type, which is
+// possible in Go 1.20.
+func (je joinedError) Unwrap() []error {
+ return je.errs
+}
+
+// newError creates a new error message with a detailed error message. The
+// message will be prefixed with the contents of the supplied error type.
+// Additionally, more errors, that provide more context can be supplied which
+// will be appended to the message. This makes use of Go 1.20's possibility to
+// include more than one %w formatting directive in [fmt.Errorf].
+//
+// For example,
+//
+// newError("no keyfunc was provided", ErrTokenUnverifiable)
+//
+// will produce the error string
+//
+// "token is unverifiable: no keyfunc was provided"
+func newError(message string, err error, more ...error) error {
+ var format string
+ var args []any
+ if message != "" {
+ format = "%w: %s"
+ args = []any{err, message}
+ } else {
+ format = "%w"
+ args = []any{err}
+ }
+
+ for _, e := range more {
+ format += ": %w"
+ args = append(args, e)
+ }
+
+ err = fmt.Errorf(format, args...)
+ return err
+}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/errors_go_other.go b/vendor/github.com/golang-jwt/jwt/v5/errors_go_other.go
new file mode 100644
index 0000000000..3afb04e648
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/errors_go_other.go
@@ -0,0 +1,78 @@
+//go:build !go1.20
+// +build !go1.20
+
+package jwt
+
+import (
+ "errors"
+ "fmt"
+)
+
+// Is implements checking for multiple errors using [errors.Is], since multiple
+// error unwrapping is not possible in versions less than Go 1.20.
+func (je joinedError) Is(err error) bool {
+ for _, e := range je.errs {
+ if errors.Is(e, err) {
+ return true
+ }
+ }
+
+ return false
+}
+
+// wrappedErrors is a workaround for wrapping multiple errors in environments
+// where Go 1.20 is not available. It basically uses the already implemented
+// functionatlity of joinedError to handle multiple errors with supplies a
+// custom error message that is identical to the one we produce in Go 1.20 using
+// multiple %w directives.
+type wrappedErrors struct {
+ msg string
+ joinedError
+}
+
+// Error returns the stored error string
+func (we wrappedErrors) Error() string {
+ return we.msg
+}
+
+// newError creates a new error message with a detailed error message. The
+// message will be prefixed with the contents of the supplied error type.
+// Additionally, more errors, that provide more context can be supplied which
+// will be appended to the message. Since we cannot use of Go 1.20's possibility
+// to include more than one %w formatting directive in [fmt.Errorf], we have to
+// emulate that.
+//
+// For example,
+//
+// newError("no keyfunc was provided", ErrTokenUnverifiable)
+//
+// will produce the error string
+//
+// "token is unverifiable: no keyfunc was provided"
+func newError(message string, err error, more ...error) error {
+ // We cannot wrap multiple errors here with %w, so we have to be a little
+ // bit creative. Basically, we are using %s instead of %w to produce the
+ // same error message and then throw the result into a custom error struct.
+ var format string
+ var args []any
+ if message != "" {
+ format = "%s: %s"
+ args = []any{err, message}
+ } else {
+ format = "%s"
+ args = []any{err}
+ }
+ errs := []error{err}
+
+ for _, e := range more {
+ format += ": %s"
+ args = append(args, e)
+ errs = append(errs, e)
+ }
+
+ err = &wrappedErrors{
+ msg: fmt.Sprintf(format, args...),
+ joinedError: joinedError{errs: errs},
+ }
+ return err
+}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/hmac.go b/vendor/github.com/golang-jwt/jwt/v5/hmac.go
new file mode 100644
index 0000000000..91b688ba9f
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/hmac.go
@@ -0,0 +1,104 @@
+package jwt
+
+import (
+ "crypto"
+ "crypto/hmac"
+ "errors"
+)
+
+// SigningMethodHMAC implements the HMAC-SHA family of signing methods.
+// Expects key type of []byte for both signing and validation
+type SigningMethodHMAC struct {
+ Name string
+ Hash crypto.Hash
+}
+
+// Specific instances for HS256 and company
+var (
+ SigningMethodHS256 *SigningMethodHMAC
+ SigningMethodHS384 *SigningMethodHMAC
+ SigningMethodHS512 *SigningMethodHMAC
+ ErrSignatureInvalid = errors.New("signature is invalid")
+)
+
+func init() {
+ // HS256
+ SigningMethodHS256 = &SigningMethodHMAC{"HS256", crypto.SHA256}
+ RegisterSigningMethod(SigningMethodHS256.Alg(), func() SigningMethod {
+ return SigningMethodHS256
+ })
+
+ // HS384
+ SigningMethodHS384 = &SigningMethodHMAC{"HS384", crypto.SHA384}
+ RegisterSigningMethod(SigningMethodHS384.Alg(), func() SigningMethod {
+ return SigningMethodHS384
+ })
+
+ // HS512
+ SigningMethodHS512 = &SigningMethodHMAC{"HS512", crypto.SHA512}
+ RegisterSigningMethod(SigningMethodHS512.Alg(), func() SigningMethod {
+ return SigningMethodHS512
+ })
+}
+
+func (m *SigningMethodHMAC) Alg() string {
+ return m.Name
+}
+
+// Verify implements token verification for the SigningMethod. Returns nil if
+// the signature is valid. Key must be []byte.
+//
+// Note it is not advised to provide a []byte which was converted from a 'human
+// readable' string using a subset of ASCII characters. To maximize entropy, you
+// should ideally be providing a []byte key which was produced from a
+// cryptographically random source, e.g. crypto/rand. Additional information
+// about this, and why we intentionally are not supporting string as a key can
+// be found on our usage guide
+// https://golang-jwt.github.io/jwt/usage/signing_methods/#signing-methods-and-key-types.
+func (m *SigningMethodHMAC) Verify(signingString string, sig []byte, key interface{}) error {
+ // Verify the key is the right type
+ keyBytes, ok := key.([]byte)
+ if !ok {
+ return ErrInvalidKeyType
+ }
+
+ // Can we use the specified hashing method?
+ if !m.Hash.Available() {
+ return ErrHashUnavailable
+ }
+
+ // This signing method is symmetric, so we validate the signature
+ // by reproducing the signature from the signing string and key, then
+ // comparing that against the provided signature.
+ hasher := hmac.New(m.Hash.New, keyBytes)
+ hasher.Write([]byte(signingString))
+ if !hmac.Equal(sig, hasher.Sum(nil)) {
+ return ErrSignatureInvalid
+ }
+
+ // No validation errors. Signature is good.
+ return nil
+}
+
+// Sign implements token signing for the SigningMethod. Key must be []byte.
+//
+// Note it is not advised to provide a []byte which was converted from a 'human
+// readable' string using a subset of ASCII characters. To maximize entropy, you
+// should ideally be providing a []byte key which was produced from a
+// cryptographically random source, e.g. crypto/rand. Additional information
+// about this, and why we intentionally are not supporting string as a key can
+// be found on our usage guide https://golang-jwt.github.io/jwt/usage/signing_methods/.
+func (m *SigningMethodHMAC) Sign(signingString string, key interface{}) ([]byte, error) {
+ if keyBytes, ok := key.([]byte); ok {
+ if !m.Hash.Available() {
+ return nil, ErrHashUnavailable
+ }
+
+ hasher := hmac.New(m.Hash.New, keyBytes)
+ hasher.Write([]byte(signingString))
+
+ return hasher.Sum(nil), nil
+ }
+
+ return nil, ErrInvalidKeyType
+}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/map_claims.go b/vendor/github.com/golang-jwt/jwt/v5/map_claims.go
new file mode 100644
index 0000000000..b2b51a1f80
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/map_claims.go
@@ -0,0 +1,109 @@
+package jwt
+
+import (
+ "encoding/json"
+ "fmt"
+)
+
+// MapClaims is a claims type that uses the map[string]interface{} for JSON
+// decoding. This is the default claims type if you don't supply one
+type MapClaims map[string]interface{}
+
+// GetExpirationTime implements the Claims interface.
+func (m MapClaims) GetExpirationTime() (*NumericDate, error) {
+ return m.parseNumericDate("exp")
+}
+
+// GetNotBefore implements the Claims interface.
+func (m MapClaims) GetNotBefore() (*NumericDate, error) {
+ return m.parseNumericDate("nbf")
+}
+
+// GetIssuedAt implements the Claims interface.
+func (m MapClaims) GetIssuedAt() (*NumericDate, error) {
+ return m.parseNumericDate("iat")
+}
+
+// GetAudience implements the Claims interface.
+func (m MapClaims) GetAudience() (ClaimStrings, error) {
+ return m.parseClaimsString("aud")
+}
+
+// GetIssuer implements the Claims interface.
+func (m MapClaims) GetIssuer() (string, error) {
+ return m.parseString("iss")
+}
+
+// GetSubject implements the Claims interface.
+func (m MapClaims) GetSubject() (string, error) {
+ return m.parseString("sub")
+}
+
+// parseNumericDate tries to parse a key in the map claims type as a number
+// date. This will succeed, if the underlying type is either a [float64] or a
+// [json.Number]. Otherwise, nil will be returned.
+func (m MapClaims) parseNumericDate(key string) (*NumericDate, error) {
+ v, ok := m[key]
+ if !ok {
+ return nil, nil
+ }
+
+ switch exp := v.(type) {
+ case float64:
+ if exp == 0 {
+ return nil, nil
+ }
+
+ return newNumericDateFromSeconds(exp), nil
+ case json.Number:
+ v, _ := exp.Float64()
+
+ return newNumericDateFromSeconds(v), nil
+ }
+
+ return nil, newError(fmt.Sprintf("%s is invalid", key), ErrInvalidType)
+}
+
+// parseClaimsString tries to parse a key in the map claims type as a
+// [ClaimsStrings] type, which can either be a string or an array of string.
+func (m MapClaims) parseClaimsString(key string) (ClaimStrings, error) {
+ var cs []string
+ switch v := m[key].(type) {
+ case string:
+ cs = append(cs, v)
+ case []string:
+ cs = v
+ case []interface{}:
+ for _, a := range v {
+ vs, ok := a.(string)
+ if !ok {
+ return nil, newError(fmt.Sprintf("%s is invalid", key), ErrInvalidType)
+ }
+ cs = append(cs, vs)
+ }
+ }
+
+ return cs, nil
+}
+
+// parseString tries to parse a key in the map claims type as a [string] type.
+// If the key does not exist, an empty string is returned. If the key has the
+// wrong type, an error is returned.
+func (m MapClaims) parseString(key string) (string, error) {
+ var (
+ ok bool
+ raw interface{}
+ iss string
+ )
+ raw, ok = m[key]
+ if !ok {
+ return "", nil
+ }
+
+ iss, ok = raw.(string)
+ if !ok {
+ return "", newError(fmt.Sprintf("%s is invalid", key), ErrInvalidType)
+ }
+
+ return iss, nil
+}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/none.go b/vendor/github.com/golang-jwt/jwt/v5/none.go
new file mode 100644
index 0000000000..c93daa5849
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/none.go
@@ -0,0 +1,50 @@
+package jwt
+
+// SigningMethodNone implements the none signing method. This is required by the spec
+// but you probably should never use it.
+var SigningMethodNone *signingMethodNone
+
+const UnsafeAllowNoneSignatureType unsafeNoneMagicConstant = "none signing method allowed"
+
+var NoneSignatureTypeDisallowedError error
+
+type signingMethodNone struct{}
+type unsafeNoneMagicConstant string
+
+func init() {
+ SigningMethodNone = &signingMethodNone{}
+ NoneSignatureTypeDisallowedError = newError("'none' signature type is not allowed", ErrTokenUnverifiable)
+
+ RegisterSigningMethod(SigningMethodNone.Alg(), func() SigningMethod {
+ return SigningMethodNone
+ })
+}
+
+func (m *signingMethodNone) Alg() string {
+ return "none"
+}
+
+// Only allow 'none' alg type if UnsafeAllowNoneSignatureType is specified as the key
+func (m *signingMethodNone) Verify(signingString string, sig []byte, key interface{}) (err error) {
+ // Key must be UnsafeAllowNoneSignatureType to prevent accidentally
+ // accepting 'none' signing method
+ if _, ok := key.(unsafeNoneMagicConstant); !ok {
+ return NoneSignatureTypeDisallowedError
+ }
+ // If signing method is none, signature must be an empty string
+ if string(sig) != "" {
+ return newError("'none' signing method with non-empty signature", ErrTokenUnverifiable)
+ }
+
+ // Accept 'none' signing method.
+ return nil
+}
+
+// Only allow 'none' signing if UnsafeAllowNoneSignatureType is specified as the key
+func (m *signingMethodNone) Sign(signingString string, key interface{}) ([]byte, error) {
+ if _, ok := key.(unsafeNoneMagicConstant); ok {
+ return []byte{}, nil
+ }
+
+ return nil, NoneSignatureTypeDisallowedError
+}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/parser.go b/vendor/github.com/golang-jwt/jwt/v5/parser.go
new file mode 100644
index 0000000000..f4386fbaac
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/parser.go
@@ -0,0 +1,215 @@
+package jwt
+
+import (
+ "bytes"
+ "encoding/base64"
+ "encoding/json"
+ "fmt"
+ "strings"
+)
+
+type Parser struct {
+ // If populated, only these methods will be considered valid.
+ validMethods []string
+
+ // Use JSON Number format in JSON decoder.
+ useJSONNumber bool
+
+ // Skip claims validation during token parsing.
+ skipClaimsValidation bool
+
+ validator *validator
+
+ decodeStrict bool
+
+ decodePaddingAllowed bool
+}
+
+// NewParser creates a new Parser with the specified options
+func NewParser(options ...ParserOption) *Parser {
+ p := &Parser{
+ validator: &validator{},
+ }
+
+ // Loop through our parsing options and apply them
+ for _, option := range options {
+ option(p)
+ }
+
+ return p
+}
+
+// Parse parses, validates, verifies the signature and returns the parsed token.
+// keyFunc will receive the parsed token and should return the key for validating.
+func (p *Parser) Parse(tokenString string, keyFunc Keyfunc) (*Token, error) {
+ return p.ParseWithClaims(tokenString, MapClaims{}, keyFunc)
+}
+
+// ParseWithClaims parses, validates, and verifies like Parse, but supplies a default object implementing the Claims
+// interface. This provides default values which can be overridden and allows a caller to use their own type, rather
+// than the default MapClaims implementation of Claims.
+//
+// Note: If you provide a custom claim implementation that embeds one of the standard claims (such as RegisteredClaims),
+// make sure that a) you either embed a non-pointer version of the claims or b) if you are using a pointer, allocate the
+// proper memory for it before passing in the overall claims, otherwise you might run into a panic.
+func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token, error) {
+ token, parts, err := p.ParseUnverified(tokenString, claims)
+ if err != nil {
+ return token, err
+ }
+
+ // Verify signing method is in the required set
+ if p.validMethods != nil {
+ var signingMethodValid = false
+ var alg = token.Method.Alg()
+ for _, m := range p.validMethods {
+ if m == alg {
+ signingMethodValid = true
+ break
+ }
+ }
+ if !signingMethodValid {
+ // signing method is not in the listed set
+ return token, newError(fmt.Sprintf("signing method %v is invalid", alg), ErrTokenSignatureInvalid)
+ }
+ }
+
+ // Lookup key
+ var key interface{}
+ if keyFunc == nil {
+ // keyFunc was not provided. short circuiting validation
+ return token, newError("no keyfunc was provided", ErrTokenUnverifiable)
+ }
+ if key, err = keyFunc(token); err != nil {
+ return token, newError("error while executing keyfunc", ErrTokenUnverifiable, err)
+ }
+
+ // Decode signature
+ token.Signature, err = p.DecodeSegment(parts[2])
+ if err != nil {
+ return token, newError("could not base64 decode signature", ErrTokenMalformed, err)
+ }
+
+ // Perform signature validation
+ if err = token.Method.Verify(strings.Join(parts[0:2], "."), token.Signature, key); err != nil {
+ return token, newError("", ErrTokenSignatureInvalid, err)
+ }
+
+ // Validate Claims
+ if !p.skipClaimsValidation {
+ // Make sure we have at least a default validator
+ if p.validator == nil {
+ p.validator = newValidator()
+ }
+
+ if err := p.validator.Validate(claims); err != nil {
+ return token, newError("", ErrTokenInvalidClaims, err)
+ }
+ }
+
+ // No errors so far, token is valid.
+ token.Valid = true
+
+ return token, nil
+}
+
+// ParseUnverified parses the token but doesn't validate the signature.
+//
+// WARNING: Don't use this method unless you know what you're doing.
+//
+// It's only ever useful in cases where you know the signature is valid (because it has
+// been checked previously in the stack) and you want to extract values from it.
+func (p *Parser) ParseUnverified(tokenString string, claims Claims) (token *Token, parts []string, err error) {
+ parts = strings.Split(tokenString, ".")
+ if len(parts) != 3 {
+ return nil, parts, newError("token contains an invalid number of segments", ErrTokenMalformed)
+ }
+
+ token = &Token{Raw: tokenString}
+
+ // parse Header
+ var headerBytes []byte
+ if headerBytes, err = p.DecodeSegment(parts[0]); err != nil {
+ if strings.HasPrefix(strings.ToLower(tokenString), "bearer ") {
+ return token, parts, newError("tokenstring should not contain 'bearer '", ErrTokenMalformed)
+ }
+ return token, parts, newError("could not base64 decode header", ErrTokenMalformed, err)
+ }
+ if err = json.Unmarshal(headerBytes, &token.Header); err != nil {
+ return token, parts, newError("could not JSON decode header", ErrTokenMalformed, err)
+ }
+
+ // parse Claims
+ var claimBytes []byte
+ token.Claims = claims
+
+ if claimBytes, err = p.DecodeSegment(parts[1]); err != nil {
+ return token, parts, newError("could not base64 decode claim", ErrTokenMalformed, err)
+ }
+ dec := json.NewDecoder(bytes.NewBuffer(claimBytes))
+ if p.useJSONNumber {
+ dec.UseNumber()
+ }
+ // JSON Decode. Special case for map type to avoid weird pointer behavior
+ if c, ok := token.Claims.(MapClaims); ok {
+ err = dec.Decode(&c)
+ } else {
+ err = dec.Decode(&claims)
+ }
+ // Handle decode error
+ if err != nil {
+ return token, parts, newError("could not JSON decode claim", ErrTokenMalformed, err)
+ }
+
+ // Lookup signature method
+ if method, ok := token.Header["alg"].(string); ok {
+ if token.Method = GetSigningMethod(method); token.Method == nil {
+ return token, parts, newError("signing method (alg) is unavailable", ErrTokenUnverifiable)
+ }
+ } else {
+ return token, parts, newError("signing method (alg) is unspecified", ErrTokenUnverifiable)
+ }
+
+ return token, parts, nil
+}
+
+// DecodeSegment decodes a JWT specific base64url encoding. This function will
+// take into account whether the [Parser] is configured with additional options,
+// such as [WithStrictDecoding] or [WithPaddingAllowed].
+func (p *Parser) DecodeSegment(seg string) ([]byte, error) {
+ encoding := base64.RawURLEncoding
+
+ if p.decodePaddingAllowed {
+ if l := len(seg) % 4; l > 0 {
+ seg += strings.Repeat("=", 4-l)
+ }
+ encoding = base64.URLEncoding
+ }
+
+ if p.decodeStrict {
+ encoding = encoding.Strict()
+ }
+ return encoding.DecodeString(seg)
+}
+
+// Parse parses, validates, verifies the signature and returns the parsed token.
+// keyFunc will receive the parsed token and should return the cryptographic key
+// for verifying the signature. The caller is strongly encouraged to set the
+// WithValidMethods option to validate the 'alg' claim in the token matches the
+// expected algorithm. For more details about the importance of validating the
+// 'alg' claim, see
+// https://auth0.com/blog/critical-vulnerabilities-in-json-web-token-libraries/
+func Parse(tokenString string, keyFunc Keyfunc, options ...ParserOption) (*Token, error) {
+ return NewParser(options...).Parse(tokenString, keyFunc)
+}
+
+// ParseWithClaims is a shortcut for NewParser().ParseWithClaims().
+//
+// Note: If you provide a custom claim implementation that embeds one of the
+// standard claims (such as RegisteredClaims), make sure that a) you either
+// embed a non-pointer version of the claims or b) if you are using a pointer,
+// allocate the proper memory for it before passing in the overall claims,
+// otherwise you might run into a panic.
+func ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc, options ...ParserOption) (*Token, error) {
+ return NewParser(options...).ParseWithClaims(tokenString, claims, keyFunc)
+}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/parser_option.go b/vendor/github.com/golang-jwt/jwt/v5/parser_option.go
new file mode 100644
index 0000000000..1b5af970f6
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/parser_option.go
@@ -0,0 +1,120 @@
+package jwt
+
+import "time"
+
+// ParserOption is used to implement functional-style options that modify the
+// behavior of the parser. To add new options, just create a function (ideally
+// beginning with With or Without) that returns an anonymous function that takes
+// a *Parser type as input and manipulates its configuration accordingly.
+type ParserOption func(*Parser)
+
+// WithValidMethods is an option to supply algorithm methods that the parser
+// will check. Only those methods will be considered valid. It is heavily
+// encouraged to use this option in order to prevent attacks such as
+// https://auth0.com/blog/critical-vulnerabilities-in-json-web-token-libraries/.
+func WithValidMethods(methods []string) ParserOption {
+ return func(p *Parser) {
+ p.validMethods = methods
+ }
+}
+
+// WithJSONNumber is an option to configure the underlying JSON parser with
+// UseNumber.
+func WithJSONNumber() ParserOption {
+ return func(p *Parser) {
+ p.useJSONNumber = true
+ }
+}
+
+// WithoutClaimsValidation is an option to disable claims validation. This
+// option should only be used if you exactly know what you are doing.
+func WithoutClaimsValidation() ParserOption {
+ return func(p *Parser) {
+ p.skipClaimsValidation = true
+ }
+}
+
+// WithLeeway returns the ParserOption for specifying the leeway window.
+func WithLeeway(leeway time.Duration) ParserOption {
+ return func(p *Parser) {
+ p.validator.leeway = leeway
+ }
+}
+
+// WithTimeFunc returns the ParserOption for specifying the time func. The
+// primary use-case for this is testing. If you are looking for a way to account
+// for clock-skew, WithLeeway should be used instead.
+func WithTimeFunc(f func() time.Time) ParserOption {
+ return func(p *Parser) {
+ p.validator.timeFunc = f
+ }
+}
+
+// WithIssuedAt returns the ParserOption to enable verification
+// of issued-at.
+func WithIssuedAt() ParserOption {
+ return func(p *Parser) {
+ p.validator.verifyIat = true
+ }
+}
+
+// WithAudience configures the validator to require the specified audience in
+// the `aud` claim. Validation will fail if the audience is not listed in the
+// token or the `aud` claim is missing.
+//
+// NOTE: While the `aud` claim is OPTIONAL in a JWT, the handling of it is
+// application-specific. Since this validation API is helping developers in
+// writing secure application, we decided to REQUIRE the existence of the claim,
+// if an audience is expected.
+func WithAudience(aud string) ParserOption {
+ return func(p *Parser) {
+ p.validator.expectedAud = aud
+ }
+}
+
+// WithIssuer configures the validator to require the specified issuer in the
+// `iss` claim. Validation will fail if a different issuer is specified in the
+// token or the `iss` claim is missing.
+//
+// NOTE: While the `iss` claim is OPTIONAL in a JWT, the handling of it is
+// application-specific. Since this validation API is helping developers in
+// writing secure application, we decided to REQUIRE the existence of the claim,
+// if an issuer is expected.
+func WithIssuer(iss string) ParserOption {
+ return func(p *Parser) {
+ p.validator.expectedIss = iss
+ }
+}
+
+// WithSubject configures the validator to require the specified subject in the
+// `sub` claim. Validation will fail if a different subject is specified in the
+// token or the `sub` claim is missing.
+//
+// NOTE: While the `sub` claim is OPTIONAL in a JWT, the handling of it is
+// application-specific. Since this validation API is helping developers in
+// writing secure application, we decided to REQUIRE the existence of the claim,
+// if a subject is expected.
+func WithSubject(sub string) ParserOption {
+ return func(p *Parser) {
+ p.validator.expectedSub = sub
+ }
+}
+
+// WithPaddingAllowed will enable the codec used for decoding JWTs to allow
+// padding. Note that the JWS RFC7515 states that the tokens will utilize a
+// Base64url encoding with no padding. Unfortunately, some implementations of
+// JWT are producing non-standard tokens, and thus require support for decoding.
+func WithPaddingAllowed() ParserOption {
+ return func(p *Parser) {
+ p.decodePaddingAllowed = true
+ }
+}
+
+// WithStrictDecoding will switch the codec used for decoding JWTs into strict
+// mode. In this mode, the decoder requires that trailing padding bits are zero,
+// as described in RFC 4648 section 3.5.
+func WithStrictDecoding() ParserOption {
+ return func(p *Parser) {
+ p.decodeStrict = true
+ }
+}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/registered_claims.go b/vendor/github.com/golang-jwt/jwt/v5/registered_claims.go
new file mode 100644
index 0000000000..77951a531d
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/registered_claims.go
@@ -0,0 +1,63 @@
+package jwt
+
+// RegisteredClaims are a structured version of the JWT Claims Set,
+// restricted to Registered Claim Names, as referenced at
+// https://datatracker.ietf.org/doc/html/rfc7519#section-4.1
+//
+// This type can be used on its own, but then additional private and
+// public claims embedded in the JWT will not be parsed. The typical use-case
+// therefore is to embedded this in a user-defined claim type.
+//
+// See examples for how to use this with your own claim types.
+type RegisteredClaims struct {
+ // the `iss` (Issuer) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.1
+ Issuer string `json:"iss,omitempty"`
+
+ // the `sub` (Subject) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.2
+ Subject string `json:"sub,omitempty"`
+
+ // the `aud` (Audience) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.3
+ Audience ClaimStrings `json:"aud,omitempty"`
+
+ // the `exp` (Expiration Time) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.4
+ ExpiresAt *NumericDate `json:"exp,omitempty"`
+
+ // the `nbf` (Not Before) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.5
+ NotBefore *NumericDate `json:"nbf,omitempty"`
+
+ // the `iat` (Issued At) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.6
+ IssuedAt *NumericDate `json:"iat,omitempty"`
+
+ // the `jti` (JWT ID) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.7
+ ID string `json:"jti,omitempty"`
+}
+
+// GetExpirationTime implements the Claims interface.
+func (c RegisteredClaims) GetExpirationTime() (*NumericDate, error) {
+ return c.ExpiresAt, nil
+}
+
+// GetNotBefore implements the Claims interface.
+func (c RegisteredClaims) GetNotBefore() (*NumericDate, error) {
+ return c.NotBefore, nil
+}
+
+// GetIssuedAt implements the Claims interface.
+func (c RegisteredClaims) GetIssuedAt() (*NumericDate, error) {
+ return c.IssuedAt, nil
+}
+
+// GetAudience implements the Claims interface.
+func (c RegisteredClaims) GetAudience() (ClaimStrings, error) {
+ return c.Audience, nil
+}
+
+// GetIssuer implements the Claims interface.
+func (c RegisteredClaims) GetIssuer() (string, error) {
+ return c.Issuer, nil
+}
+
+// GetSubject implements the Claims interface.
+func (c RegisteredClaims) GetSubject() (string, error) {
+ return c.Subject, nil
+}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/rsa.go b/vendor/github.com/golang-jwt/jwt/v5/rsa.go
new file mode 100644
index 0000000000..daff094313
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/rsa.go
@@ -0,0 +1,93 @@
+package jwt
+
+import (
+ "crypto"
+ "crypto/rand"
+ "crypto/rsa"
+)
+
+// SigningMethodRSA implements the RSA family of signing methods.
+// Expects *rsa.PrivateKey for signing and *rsa.PublicKey for validation
+type SigningMethodRSA struct {
+ Name string
+ Hash crypto.Hash
+}
+
+// Specific instances for RS256 and company
+var (
+ SigningMethodRS256 *SigningMethodRSA
+ SigningMethodRS384 *SigningMethodRSA
+ SigningMethodRS512 *SigningMethodRSA
+)
+
+func init() {
+ // RS256
+ SigningMethodRS256 = &SigningMethodRSA{"RS256", crypto.SHA256}
+ RegisterSigningMethod(SigningMethodRS256.Alg(), func() SigningMethod {
+ return SigningMethodRS256
+ })
+
+ // RS384
+ SigningMethodRS384 = &SigningMethodRSA{"RS384", crypto.SHA384}
+ RegisterSigningMethod(SigningMethodRS384.Alg(), func() SigningMethod {
+ return SigningMethodRS384
+ })
+
+ // RS512
+ SigningMethodRS512 = &SigningMethodRSA{"RS512", crypto.SHA512}
+ RegisterSigningMethod(SigningMethodRS512.Alg(), func() SigningMethod {
+ return SigningMethodRS512
+ })
+}
+
+func (m *SigningMethodRSA) Alg() string {
+ return m.Name
+}
+
+// Verify implements token verification for the SigningMethod
+// For this signing method, must be an *rsa.PublicKey structure.
+func (m *SigningMethodRSA) Verify(signingString string, sig []byte, key interface{}) error {
+ var rsaKey *rsa.PublicKey
+ var ok bool
+
+ if rsaKey, ok = key.(*rsa.PublicKey); !ok {
+ return ErrInvalidKeyType
+ }
+
+ // Create hasher
+ if !m.Hash.Available() {
+ return ErrHashUnavailable
+ }
+ hasher := m.Hash.New()
+ hasher.Write([]byte(signingString))
+
+ // Verify the signature
+ return rsa.VerifyPKCS1v15(rsaKey, m.Hash, hasher.Sum(nil), sig)
+}
+
+// Sign implements token signing for the SigningMethod
+// For this signing method, must be an *rsa.PrivateKey structure.
+func (m *SigningMethodRSA) Sign(signingString string, key interface{}) ([]byte, error) {
+ var rsaKey *rsa.PrivateKey
+ var ok bool
+
+ // Validate type of key
+ if rsaKey, ok = key.(*rsa.PrivateKey); !ok {
+ return nil, ErrInvalidKey
+ }
+
+ // Create the hasher
+ if !m.Hash.Available() {
+ return nil, ErrHashUnavailable
+ }
+
+ hasher := m.Hash.New()
+ hasher.Write([]byte(signingString))
+
+ // Sign the string and return the encoded bytes
+ if sigBytes, err := rsa.SignPKCS1v15(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil)); err == nil {
+ return sigBytes, nil
+ } else {
+ return nil, err
+ }
+}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/rsa_pss.go b/vendor/github.com/golang-jwt/jwt/v5/rsa_pss.go
new file mode 100644
index 0000000000..9599f0a46c
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/rsa_pss.go
@@ -0,0 +1,135 @@
+//go:build go1.4
+// +build go1.4
+
+package jwt
+
+import (
+ "crypto"
+ "crypto/rand"
+ "crypto/rsa"
+)
+
+// SigningMethodRSAPSS implements the RSAPSS family of signing methods signing methods
+type SigningMethodRSAPSS struct {
+ *SigningMethodRSA
+ Options *rsa.PSSOptions
+ // VerifyOptions is optional. If set overrides Options for rsa.VerifyPPS.
+ // Used to accept tokens signed with rsa.PSSSaltLengthAuto, what doesn't follow
+ // https://tools.ietf.org/html/rfc7518#section-3.5 but was used previously.
+ // See https://github.com/dgrijalva/jwt-go/issues/285#issuecomment-437451244 for details.
+ VerifyOptions *rsa.PSSOptions
+}
+
+// Specific instances for RS/PS and company.
+var (
+ SigningMethodPS256 *SigningMethodRSAPSS
+ SigningMethodPS384 *SigningMethodRSAPSS
+ SigningMethodPS512 *SigningMethodRSAPSS
+)
+
+func init() {
+ // PS256
+ SigningMethodPS256 = &SigningMethodRSAPSS{
+ SigningMethodRSA: &SigningMethodRSA{
+ Name: "PS256",
+ Hash: crypto.SHA256,
+ },
+ Options: &rsa.PSSOptions{
+ SaltLength: rsa.PSSSaltLengthEqualsHash,
+ },
+ VerifyOptions: &rsa.PSSOptions{
+ SaltLength: rsa.PSSSaltLengthAuto,
+ },
+ }
+ RegisterSigningMethod(SigningMethodPS256.Alg(), func() SigningMethod {
+ return SigningMethodPS256
+ })
+
+ // PS384
+ SigningMethodPS384 = &SigningMethodRSAPSS{
+ SigningMethodRSA: &SigningMethodRSA{
+ Name: "PS384",
+ Hash: crypto.SHA384,
+ },
+ Options: &rsa.PSSOptions{
+ SaltLength: rsa.PSSSaltLengthEqualsHash,
+ },
+ VerifyOptions: &rsa.PSSOptions{
+ SaltLength: rsa.PSSSaltLengthAuto,
+ },
+ }
+ RegisterSigningMethod(SigningMethodPS384.Alg(), func() SigningMethod {
+ return SigningMethodPS384
+ })
+
+ // PS512
+ SigningMethodPS512 = &SigningMethodRSAPSS{
+ SigningMethodRSA: &SigningMethodRSA{
+ Name: "PS512",
+ Hash: crypto.SHA512,
+ },
+ Options: &rsa.PSSOptions{
+ SaltLength: rsa.PSSSaltLengthEqualsHash,
+ },
+ VerifyOptions: &rsa.PSSOptions{
+ SaltLength: rsa.PSSSaltLengthAuto,
+ },
+ }
+ RegisterSigningMethod(SigningMethodPS512.Alg(), func() SigningMethod {
+ return SigningMethodPS512
+ })
+}
+
+// Verify implements token verification for the SigningMethod.
+// For this verify method, key must be an rsa.PublicKey struct
+func (m *SigningMethodRSAPSS) Verify(signingString string, sig []byte, key interface{}) error {
+ var rsaKey *rsa.PublicKey
+ switch k := key.(type) {
+ case *rsa.PublicKey:
+ rsaKey = k
+ default:
+ return ErrInvalidKey
+ }
+
+ // Create hasher
+ if !m.Hash.Available() {
+ return ErrHashUnavailable
+ }
+ hasher := m.Hash.New()
+ hasher.Write([]byte(signingString))
+
+ opts := m.Options
+ if m.VerifyOptions != nil {
+ opts = m.VerifyOptions
+ }
+
+ return rsa.VerifyPSS(rsaKey, m.Hash, hasher.Sum(nil), sig, opts)
+}
+
+// Sign implements token signing for the SigningMethod.
+// For this signing method, key must be an rsa.PrivateKey struct
+func (m *SigningMethodRSAPSS) Sign(signingString string, key interface{}) ([]byte, error) {
+ var rsaKey *rsa.PrivateKey
+
+ switch k := key.(type) {
+ case *rsa.PrivateKey:
+ rsaKey = k
+ default:
+ return nil, ErrInvalidKeyType
+ }
+
+ // Create the hasher
+ if !m.Hash.Available() {
+ return nil, ErrHashUnavailable
+ }
+
+ hasher := m.Hash.New()
+ hasher.Write([]byte(signingString))
+
+ // Sign the string and return the encoded bytes
+ if sigBytes, err := rsa.SignPSS(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil), m.Options); err == nil {
+ return sigBytes, nil
+ } else {
+ return nil, err
+ }
+}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/rsa_utils.go b/vendor/github.com/golang-jwt/jwt/v5/rsa_utils.go
new file mode 100644
index 0000000000..b3aeebbe11
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/rsa_utils.go
@@ -0,0 +1,107 @@
+package jwt
+
+import (
+ "crypto/rsa"
+ "crypto/x509"
+ "encoding/pem"
+ "errors"
+)
+
+var (
+ ErrKeyMustBePEMEncoded = errors.New("invalid key: Key must be a PEM encoded PKCS1 or PKCS8 key")
+ ErrNotRSAPrivateKey = errors.New("key is not a valid RSA private key")
+ ErrNotRSAPublicKey = errors.New("key is not a valid RSA public key")
+)
+
+// ParseRSAPrivateKeyFromPEM parses a PEM encoded PKCS1 or PKCS8 private key
+func ParseRSAPrivateKeyFromPEM(key []byte) (*rsa.PrivateKey, error) {
+ var err error
+
+ // Parse PEM block
+ var block *pem.Block
+ if block, _ = pem.Decode(key); block == nil {
+ return nil, ErrKeyMustBePEMEncoded
+ }
+
+ var parsedKey interface{}
+ if parsedKey, err = x509.ParsePKCS1PrivateKey(block.Bytes); err != nil {
+ if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil {
+ return nil, err
+ }
+ }
+
+ var pkey *rsa.PrivateKey
+ var ok bool
+ if pkey, ok = parsedKey.(*rsa.PrivateKey); !ok {
+ return nil, ErrNotRSAPrivateKey
+ }
+
+ return pkey, nil
+}
+
+// ParseRSAPrivateKeyFromPEMWithPassword parses a PEM encoded PKCS1 or PKCS8 private key protected with password
+//
+// Deprecated: This function is deprecated and should not be used anymore. It uses the deprecated x509.DecryptPEMBlock
+// function, which was deprecated since RFC 1423 is regarded insecure by design. Unfortunately, there is no alternative
+// in the Go standard library for now. See https://github.com/golang/go/issues/8860.
+func ParseRSAPrivateKeyFromPEMWithPassword(key []byte, password string) (*rsa.PrivateKey, error) {
+ var err error
+
+ // Parse PEM block
+ var block *pem.Block
+ if block, _ = pem.Decode(key); block == nil {
+ return nil, ErrKeyMustBePEMEncoded
+ }
+
+ var parsedKey interface{}
+
+ var blockDecrypted []byte
+ if blockDecrypted, err = x509.DecryptPEMBlock(block, []byte(password)); err != nil {
+ return nil, err
+ }
+
+ if parsedKey, err = x509.ParsePKCS1PrivateKey(blockDecrypted); err != nil {
+ if parsedKey, err = x509.ParsePKCS8PrivateKey(blockDecrypted); err != nil {
+ return nil, err
+ }
+ }
+
+ var pkey *rsa.PrivateKey
+ var ok bool
+ if pkey, ok = parsedKey.(*rsa.PrivateKey); !ok {
+ return nil, ErrNotRSAPrivateKey
+ }
+
+ return pkey, nil
+}
+
+// ParseRSAPublicKeyFromPEM parses a certificate or a PEM encoded PKCS1 or PKIX public key
+func ParseRSAPublicKeyFromPEM(key []byte) (*rsa.PublicKey, error) {
+ var err error
+
+ // Parse PEM block
+ var block *pem.Block
+ if block, _ = pem.Decode(key); block == nil {
+ return nil, ErrKeyMustBePEMEncoded
+ }
+
+ // Parse the key
+ var parsedKey interface{}
+ if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil {
+ if cert, err := x509.ParseCertificate(block.Bytes); err == nil {
+ parsedKey = cert.PublicKey
+ } else {
+ if parsedKey, err = x509.ParsePKCS1PublicKey(block.Bytes); err != nil {
+ return nil, err
+ }
+ }
+ }
+
+ var pkey *rsa.PublicKey
+ var ok bool
+ if pkey, ok = parsedKey.(*rsa.PublicKey); !ok {
+ return nil, ErrNotRSAPublicKey
+ }
+
+ return pkey, nil
+}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/signing_method.go b/vendor/github.com/golang-jwt/jwt/v5/signing_method.go
new file mode 100644
index 0000000000..0d73631c1b
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/signing_method.go
@@ -0,0 +1,49 @@
+package jwt
+
+import (
+ "sync"
+)
+
+var signingMethods = map[string]func() SigningMethod{}
+var signingMethodLock = new(sync.RWMutex)
+
+// SigningMethod can be used add new methods for signing or verifying tokens. It
+// takes a decoded signature as an input in the Verify function and produces a
+// signature in Sign. The signature is then usually base64 encoded as part of a
+// JWT.
+type SigningMethod interface {
+ Verify(signingString string, sig []byte, key interface{}) error // Returns nil if signature is valid
+ Sign(signingString string, key interface{}) ([]byte, error) // Returns signature or error
+ Alg() string // returns the alg identifier for this method (example: 'HS256')
+}
+
+// RegisterSigningMethod registers the "alg" name and a factory function for signing method.
+// This is typically done during init() in the method's implementation
+func RegisterSigningMethod(alg string, f func() SigningMethod) {
+ signingMethodLock.Lock()
+ defer signingMethodLock.Unlock()
+
+ signingMethods[alg] = f
+}
+
+// GetSigningMethod retrieves a signing method from an "alg" string
+func GetSigningMethod(alg string) (method SigningMethod) {
+ signingMethodLock.RLock()
+ defer signingMethodLock.RUnlock()
+
+ if methodF, ok := signingMethods[alg]; ok {
+ method = methodF()
+ }
+ return
+}
+
+// GetAlgorithms returns a list of registered "alg" names
+func GetAlgorithms() (algs []string) {
+ signingMethodLock.RLock()
+ defer signingMethodLock.RUnlock()
+
+ for alg := range signingMethods {
+ algs = append(algs, alg)
+ }
+ return
+}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/staticcheck.conf b/vendor/github.com/golang-jwt/jwt/v5/staticcheck.conf
new file mode 100644
index 0000000000..53745d51d7
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/staticcheck.conf
@@ -0,0 +1 @@
+checks = ["all", "-ST1000", "-ST1003", "-ST1016", "-ST1023"]
diff --git a/vendor/github.com/golang-jwt/jwt/v5/token.go b/vendor/github.com/golang-jwt/jwt/v5/token.go
new file mode 100644
index 0000000000..c8ad7c7834
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/token.go
@@ -0,0 +1,86 @@
+package jwt
+
+import (
+ "encoding/base64"
+ "encoding/json"
+)
+
+// Keyfunc will be used by the Parse methods as a callback function to supply
+// the key for verification. The function receives the parsed, but unverified
+// Token. This allows you to use properties in the Header of the token (such as
+// `kid`) to identify which key to use.
+type Keyfunc func(*Token) (interface{}, error)
+
+// Token represents a JWT Token. Different fields will be used depending on
+// whether you're creating or parsing/verifying a token.
+type Token struct {
+ Raw string // Raw contains the raw token. Populated when you [Parse] a token
+ Method SigningMethod // Method is the signing method used or to be used
+ Header map[string]interface{} // Header is the first segment of the token in decoded form
+ Claims Claims // Claims is the second segment of the token in decoded form
+ Signature []byte // Signature is the third segment of the token in decoded form. Populated when you Parse a token
+ Valid bool // Valid specifies if the token is valid. Populated when you Parse/Verify a token
+}
+
+// New creates a new [Token] with the specified signing method and an empty map
+// of claims. Additional options can be specified, but are currently unused.
+func New(method SigningMethod, opts ...TokenOption) *Token {
+ return NewWithClaims(method, MapClaims{}, opts...)
+}
+
+// NewWithClaims creates a new [Token] with the specified signing method and
+// claims. Additional options can be specified, but are currently unused.
+func NewWithClaims(method SigningMethod, claims Claims, opts ...TokenOption) *Token {
+ return &Token{
+ Header: map[string]interface{}{
+ "typ": "JWT",
+ "alg": method.Alg(),
+ },
+ Claims: claims,
+ Method: method,
+ }
+}
+
+// SignedString creates and returns a complete, signed JWT. The token is signed
+// using the SigningMethod specified in the token. Please refer to
+// https://golang-jwt.github.io/jwt/usage/signing_methods/#signing-methods-and-key-types
+// for an overview of the different signing methods and their respective key
+// types.
+func (t *Token) SignedString(key interface{}) (string, error) {
+ sstr, err := t.SigningString()
+ if err != nil {
+ return "", err
+ }
+
+ sig, err := t.Method.Sign(sstr, key)
+ if err != nil {
+ return "", err
+ }
+
+ return sstr + "." + t.EncodeSegment(sig), nil
+}
+
+// SigningString generates the signing string. This is the most expensive part
+// of the whole deal. Unless you need this for something special, just go
+// straight for the SignedString.
+func (t *Token) SigningString() (string, error) {
+ h, err := json.Marshal(t.Header)
+ if err != nil {
+ return "", err
+ }
+
+ c, err := json.Marshal(t.Claims)
+ if err != nil {
+ return "", err
+ }
+
+ return t.EncodeSegment(h) + "." + t.EncodeSegment(c), nil
+}
+
+// EncodeSegment encodes a JWT specific base64url encoding with padding
+// stripped. In the future, this function might take into account a
+// [TokenOption]. Therefore, this function exists as a method of [Token], rather
+// than a global function.
+func (*Token) EncodeSegment(seg []byte) string {
+ return base64.RawURLEncoding.EncodeToString(seg)
+}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/token_option.go b/vendor/github.com/golang-jwt/jwt/v5/token_option.go
new file mode 100644
index 0000000000..b4ae3badf8
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/token_option.go
@@ -0,0 +1,5 @@
+package jwt
+
+// TokenOption is a reserved type, which provides some forward compatibility,
+// if we ever want to introduce token creation-related options.
+type TokenOption func(*Token)
diff --git a/vendor/github.com/golang-jwt/jwt/v5/types.go b/vendor/github.com/golang-jwt/jwt/v5/types.go
new file mode 100644
index 0000000000..b82b38867d
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/types.go
@@ -0,0 +1,150 @@
+package jwt
+
+import (
+ "encoding/json"
+ "fmt"
+ "math"
+ "reflect"
+ "strconv"
+ "time"
+)
+
+// TimePrecision sets the precision of times and dates within this library. This
+// has an influence on the precision of times when comparing expiry or other
+// related time fields. Furthermore, it is also the precision of times when
+// serializing.
+//
+// For backwards compatibility the default precision is set to seconds, so that
+// no fractional timestamps are generated.
+var TimePrecision = time.Second
+
+// MarshalSingleStringAsArray modifies the behavior of the ClaimStrings type,
+// especially its MarshalJSON function.
+//
+// If it is set to true (the default), it will always serialize the type as an
+// array of strings, even if it just contains one element, defaulting to the
+// behavior of the underlying []string. If it is set to false, it will serialize
+// to a single string, if it contains one element. Otherwise, it will serialize
+// to an array of strings.
+var MarshalSingleStringAsArray = true
+
+// NumericDate represents a JSON numeric date value, as referenced at
+// https://datatracker.ietf.org/doc/html/rfc7519#section-2.
+type NumericDate struct {
+ time.Time
+}
+
+// NewNumericDate constructs a new *NumericDate from a standard library time.Time struct.
+// It will truncate the timestamp according to the precision specified in TimePrecision.
+func NewNumericDate(t time.Time) *NumericDate {
+ return &NumericDate{t.Truncate(TimePrecision)}
+}
+
+// newNumericDateFromSeconds creates a new *NumericDate out of a float64 representing a
+// UNIX epoch with the float fraction representing non-integer seconds.
+func newNumericDateFromSeconds(f float64) *NumericDate {
+ round, frac := math.Modf(f)
+ return NewNumericDate(time.Unix(int64(round), int64(frac*1e9)))
+}
+
+// MarshalJSON is an implementation of the json.RawMessage interface and serializes the UNIX epoch
+// represented in NumericDate to a byte array, using the precision specified in TimePrecision.
+func (date NumericDate) MarshalJSON() (b []byte, err error) {
+ var prec int
+ if TimePrecision < time.Second {
+ prec = int(math.Log10(float64(time.Second) / float64(TimePrecision)))
+ }
+ truncatedDate := date.Truncate(TimePrecision)
+
+ // For very large timestamps, UnixNano would overflow an int64, but this
+ // function requires nanosecond level precision, so we have to use the
+ // following technique to get round the issue:
+ //
+ // 1. Take the normal unix timestamp to form the whole number part of the
+ // output,
+ // 2. Take the result of the Nanosecond function, which returns the offset
+ // within the second of the particular unix time instance, to form the
+ // decimal part of the output
+ // 3. Concatenate them to produce the final result
+ seconds := strconv.FormatInt(truncatedDate.Unix(), 10)
+ nanosecondsOffset := strconv.FormatFloat(float64(truncatedDate.Nanosecond())/float64(time.Second), 'f', prec, 64)
+
+ output := append([]byte(seconds), []byte(nanosecondsOffset)[1:]...)
+
+ return output, nil
+}
+
+// UnmarshalJSON is an implementation of the json.RawMessage interface and
+// deserializes a [NumericDate] from a JSON representation, i.e. a
+// [json.Number]. This number represents an UNIX epoch with either integer or
+// non-integer seconds.
+func (date *NumericDate) UnmarshalJSON(b []byte) (err error) {
+ var (
+ number json.Number
+ f float64
+ )
+
+ if err = json.Unmarshal(b, &number); err != nil {
+ return fmt.Errorf("could not parse NumericData: %w", err)
+ }
+
+ if f, err = number.Float64(); err != nil {
+ return fmt.Errorf("could not convert json number value to float: %w", err)
+ }
+
+ n := newNumericDateFromSeconds(f)
+ *date = *n
+
+ return nil
+}
+
+// ClaimStrings is basically just a slice of strings, but it can be either
+// serialized from a string array or just a string. This type is necessary,
+// since the "aud" claim can either be a single string or an array.
+type ClaimStrings []string
+
+func (s *ClaimStrings) UnmarshalJSON(data []byte) (err error) {
+ var value interface{}
+
+ if err = json.Unmarshal(data, &value); err != nil {
+ return err
+ }
+
+ var aud []string
+
+ switch v := value.(type) {
+ case string:
+ aud = append(aud, v)
+ case []string:
+ aud = ClaimStrings(v)
+ case []interface{}:
+ for _, vv := range v {
+ vs, ok := vv.(string)
+ if !ok {
+ return &json.UnsupportedTypeError{Type: reflect.TypeOf(vv)}
+ }
+ aud = append(aud, vs)
+ }
+ case nil:
+ return nil
+ default:
+ return &json.UnsupportedTypeError{Type: reflect.TypeOf(v)}
+ }
+
+ *s = aud
+
+ return
+}
+
+func (s ClaimStrings) MarshalJSON() (b []byte, err error) {
+ // This handles a special case in the JWT RFC. If the string array, e.g.
+ // used by the "aud" field, only contains one element, it MAY be serialized
+ // as a single string. This may or may not be desired based on the ecosystem
+ // of other JWT library used, so we make it configurable by the variable
+ // MarshalSingleStringAsArray.
+ if len(s) == 1 && !MarshalSingleStringAsArray {
+ return json.Marshal(s[0])
+ }
+
+ return json.Marshal([]string(s))
+}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/validator.go b/vendor/github.com/golang-jwt/jwt/v5/validator.go
new file mode 100644
index 0000000000..3850438939
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/validator.go
@@ -0,0 +1,301 @@
+package jwt
+
+import (
+ "crypto/subtle"
+ "fmt"
+ "time"
+)
+
+// ClaimsValidator is an interface that can be implemented by custom claims who
+// wish to execute any additional claims validation based on
+// application-specific logic. The Validate function is then executed in
+// addition to the regular claims validation and any error returned is appended
+// to the final validation result.
+//
+// type MyCustomClaims struct {
+// Foo string `json:"foo"`
+// jwt.RegisteredClaims
+// }
+//
+// func (m MyCustomClaims) Validate() error {
+// if m.Foo != "bar" {
+// return errors.New("must be foobar")
+// }
+// return nil
+// }
+type ClaimsValidator interface {
+ Claims
+ Validate() error
+}
+
+// validator is the core of the new Validation API. It is automatically used by
+// a [Parser] during parsing and can be modified with various parser options.
+//
+// Note: This struct is intentionally not exported (yet) as we want to
+// internally finalize its API. In the future, we might make it publicly
+// available.
+type validator struct {
+ // leeway is an optional leeway that can be provided to account for clock skew.
+ leeway time.Duration
+
+ // timeFunc is used to supply the current time that is needed for
+ // validation. If unspecified, this defaults to time.Now.
+ timeFunc func() time.Time
+
+ // verifyIat specifies whether the iat (Issued At) claim will be verified.
+ // According to https://www.rfc-editor.org/rfc/rfc7519#section-4.1.6 this
+ // only specifies the age of the token, but no validation check is
+ // necessary. However, if wanted, it can be checked if the iat is
+ // unrealistic, i.e., in the future.
+ verifyIat bool
+
+ // expectedAud contains the audience this token expects. Supplying an empty
+ // string will disable aud checking.
+ expectedAud string
+
+ // expectedIss contains the issuer this token expects. Supplying an empty
+ // string will disable iss checking.
+ expectedIss string
+
+ // expectedSub contains the subject this token expects. Supplying an empty
+ // string will disable sub checking.
+ expectedSub string
+}
+
+// newValidator can be used to create a stand-alone validator with the supplied
+// options. This validator can then be used to validate already parsed claims.
+func newValidator(opts ...ParserOption) *validator {
+ p := NewParser(opts...)
+ return p.validator
+}
+
+// Validate validates the given claims. It will also perform any custom
+// validation if claims implements the [ClaimsValidator] interface.
+func (v *validator) Validate(claims Claims) error {
+ var (
+ now time.Time
+ errs []error = make([]error, 0, 6)
+ err error
+ )
+
+ // Check, if we have a time func
+ if v.timeFunc != nil {
+ now = v.timeFunc()
+ } else {
+ now = time.Now()
+ }
+
+ // We always need to check the expiration time, but usage of the claim
+ // itself is OPTIONAL.
+ if err = v.verifyExpiresAt(claims, now, false); err != nil {
+ errs = append(errs, err)
+ }
+
+ // We always need to check not-before, but usage of the claim itself is
+ // OPTIONAL.
+ if err = v.verifyNotBefore(claims, now, false); err != nil {
+ errs = append(errs, err)
+ }
+
+ // Check issued-at if the option is enabled
+ if v.verifyIat {
+ if err = v.verifyIssuedAt(claims, now, false); err != nil {
+ errs = append(errs, err)
+ }
+ }
+
+ // If we have an expected audience, we also require the audience claim
+ if v.expectedAud != "" {
+ if err = v.verifyAudience(claims, v.expectedAud, true); err != nil {
+ errs = append(errs, err)
+ }
+ }
+
+ // If we have an expected issuer, we also require the issuer claim
+ if v.expectedIss != "" {
+ if err = v.verifyIssuer(claims, v.expectedIss, true); err != nil {
+ errs = append(errs, err)
+ }
+ }
+
+ // If we have an expected subject, we also require the subject claim
+ if v.expectedSub != "" {
+ if err = v.verifySubject(claims, v.expectedSub, true); err != nil {
+ errs = append(errs, err)
+ }
+ }
+
+ // Finally, we want to give the claim itself some possibility to do some
+ // additional custom validation based on a custom Validate function.
+ cvt, ok := claims.(ClaimsValidator)
+ if ok {
+ if err := cvt.Validate(); err != nil {
+ errs = append(errs, err)
+ }
+ }
+
+ if len(errs) == 0 {
+ return nil
+ }
+
+ return joinErrors(errs...)
+}
+
+// verifyExpiresAt compares the exp claim in claims against cmp. This function
+// will succeed if cmp < exp. Additional leeway is taken into account.
+//
+// If exp is not set, it will succeed if the claim is not required,
+// otherwise ErrTokenRequiredClaimMissing will be returned.
+//
+// Additionally, if any error occurs while retrieving the claim, e.g., when its
+// the wrong type, an ErrTokenUnverifiable error will be returned.
+func (v *validator) verifyExpiresAt(claims Claims, cmp time.Time, required bool) error {
+ exp, err := claims.GetExpirationTime()
+ if err != nil {
+ return err
+ }
+
+ if exp == nil {
+ return errorIfRequired(required, "exp")
+ }
+
+ return errorIfFalse(cmp.Before((exp.Time).Add(+v.leeway)), ErrTokenExpired)
+}
+
+// verifyIssuedAt compares the iat claim in claims against cmp. This function
+// will succeed if cmp >= iat. Additional leeway is taken into account.
+//
+// If iat is not set, it will succeed if the claim is not required,
+// otherwise ErrTokenRequiredClaimMissing will be returned.
+//
+// Additionally, if any error occurs while retrieving the claim, e.g., when its
+// the wrong type, an ErrTokenUnverifiable error will be returned.
+func (v *validator) verifyIssuedAt(claims Claims, cmp time.Time, required bool) error {
+ iat, err := claims.GetIssuedAt()
+ if err != nil {
+ return err
+ }
+
+ if iat == nil {
+ return errorIfRequired(required, "iat")
+ }
+
+ return errorIfFalse(!cmp.Before(iat.Add(-v.leeway)), ErrTokenUsedBeforeIssued)
+}
+
+// verifyNotBefore compares the nbf claim in claims against cmp. This function
+// will return true if cmp >= nbf. Additional leeway is taken into account.
+//
+// If nbf is not set, it will succeed if the claim is not required,
+// otherwise ErrTokenRequiredClaimMissing will be returned.
+//
+// Additionally, if any error occurs while retrieving the claim, e.g., when its
+// the wrong type, an ErrTokenUnverifiable error will be returned.
+func (v *validator) verifyNotBefore(claims Claims, cmp time.Time, required bool) error {
+ nbf, err := claims.GetNotBefore()
+ if err != nil {
+ return err
+ }
+
+ if nbf == nil {
+ return errorIfRequired(required, "nbf")
+ }
+
+ return errorIfFalse(!cmp.Before(nbf.Add(-v.leeway)), ErrTokenNotValidYet)
+}
+
+// verifyAudience compares the aud claim against cmp.
+//
+// If aud is not set or an empty list, it will succeed if the claim is not required,
+// otherwise ErrTokenRequiredClaimMissing will be returned.
+//
+// Additionally, if any error occurs while retrieving the claim, e.g., when its
+// the wrong type, an ErrTokenUnverifiable error will be returned.
+func (v *validator) verifyAudience(claims Claims, cmp string, required bool) error {
+ aud, err := claims.GetAudience()
+ if err != nil {
+ return err
+ }
+
+ if len(aud) == 0 {
+ return errorIfRequired(required, "aud")
+ }
+
+ // use a var here to keep constant time compare when looping over a number of claims
+ result := false
+
+ var stringClaims string
+ for _, a := range aud {
+ if subtle.ConstantTimeCompare([]byte(a), []byte(cmp)) != 0 {
+ result = true
+ }
+ stringClaims = stringClaims + a
+ }
+
+ // case where "" is sent in one or many aud claims
+ if stringClaims == "" {
+ return errorIfRequired(required, "aud")
+ }
+
+ return errorIfFalse(result, ErrTokenInvalidAudience)
+}
+
+// verifyIssuer compares the iss claim in claims against cmp.
+//
+// If iss is not set, it will succeed if the claim is not required,
+// otherwise ErrTokenRequiredClaimMissing will be returned.
+//
+// Additionally, if any error occurs while retrieving the claim, e.g., when its
+// the wrong type, an ErrTokenUnverifiable error will be returned.
+func (v *validator) verifyIssuer(claims Claims, cmp string, required bool) error {
+ iss, err := claims.GetIssuer()
+ if err != nil {
+ return err
+ }
+
+ if iss == "" {
+ return errorIfRequired(required, "iss")
+ }
+
+ return errorIfFalse(iss == cmp, ErrTokenInvalidIssuer)
+}
+
+// verifySubject compares the sub claim against cmp.
+//
+// If sub is not set, it will succeed if the claim is not required,
+// otherwise ErrTokenRequiredClaimMissing will be returned.
+//
+// Additionally, if any error occurs while retrieving the claim, e.g., when its
+// the wrong type, an ErrTokenUnverifiable error will be returned.
+func (v *validator) verifySubject(claims Claims, cmp string, required bool) error {
+ sub, err := claims.GetSubject()
+ if err != nil {
+ return err
+ }
+
+ if sub == "" {
+ return errorIfRequired(required, "sub")
+ }
+
+ return errorIfFalse(sub == cmp, ErrTokenInvalidSubject)
+}
+
+// errorIfFalse returns the error specified in err, if the value is true.
+// Otherwise, nil is returned.
+func errorIfFalse(value bool, err error) error {
+ if value {
+ return nil
+ } else {
+ return err
+ }
+}
+
+// errorIfRequired returns an ErrTokenRequiredClaimMissing error if required is
+// true. Otherwise, nil is returned.
+func errorIfRequired(required bool, claim string) error {
+ if required {
+ return newError(fmt.Sprintf("%s claim is required", claim), ErrTokenRequiredClaimMissing)
+ } else {
+ return nil
+ }
+}
diff --git a/vendor/github.com/google/cel-go/cel/BUILD.bazel b/vendor/github.com/google/cel-go/cel/BUILD.bazel
index e973abfc54..4331321139 100644
--- a/vendor/github.com/google/cel-go/cel/BUILD.bazel
+++ b/vendor/github.com/google/cel-go/cel/BUILD.bazel
@@ -23,6 +23,7 @@ go_library(
"//checker/decls:go_default_library",
"//common:go_default_library",
"//common/containers:go_default_library",
+ "//common/operators:go_default_library",
"//common/overloads:go_default_library",
"//common/types:go_default_library",
"//common/types/pb:go_default_library",
@@ -31,7 +32,7 @@ go_library(
"//interpreter:go_default_library",
"//interpreter/functions:go_default_library",
"//parser:go_default_library",
- "@org_golang_google_genproto//googleapis/api/expr/v1alpha1:go_default_library",
+ "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library",
"@org_golang_google_protobuf//proto:go_default_library",
"@org_golang_google_protobuf//reflect/protodesc:go_default_library",
"@org_golang_google_protobuf//reflect/protoreflect:go_default_library",
@@ -69,7 +70,7 @@ go_test(
"//test/proto2pb:go_default_library",
"//test/proto3pb:go_default_library",
"@io_bazel_rules_go//proto/wkt:descriptor_go_proto",
- "@org_golang_google_genproto//googleapis/api/expr/v1alpha1:go_default_library",
+ "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library",
"@org_golang_google_protobuf//proto:go_default_library",
"@org_golang_google_protobuf//types/known/structpb:go_default_library",
],
diff --git a/vendor/github.com/google/cel-go/cel/decls.go b/vendor/github.com/google/cel-go/cel/decls.go
index f2df721d07..c0624d1e59 100644
--- a/vendor/github.com/google/cel-go/cel/decls.go
+++ b/vendor/github.com/google/cel-go/cel/decls.go
@@ -139,7 +139,7 @@ var (
kind: TypeKind,
runtimeType: types.TypeType,
}
- //UintType represents a uint type.
+ // UintType represents a uint type.
UintType = &Type{
kind: UintKind,
runtimeType: types.UintType,
@@ -222,7 +222,8 @@ func (t *Type) equals(other *Type) bool {
// - The from types are the same instance
// - The target type is dynamic
// - The fromType has the same kind and type name as the target type, and all parameters of the target type
-// are IsAssignableType() from the parameters of the fromType.
+//
+// are IsAssignableType() from the parameters of the fromType.
func (t *Type) defaultIsAssignableType(fromType *Type) bool {
if t == fromType || t.isDyn() {
return true
@@ -312,6 +313,11 @@ func NullableType(wrapped *Type) *Type {
}
}
+// OptionalType creates an abstract parameterized type instance corresponding to CEL's notion of optional.
+func OptionalType(param *Type) *Type {
+ return OpaqueType("optional", param)
+}
+
// OpaqueType creates an abstract parameterized type with a given name.
func OpaqueType(name string, params ...*Type) *Type {
return &Type{
@@ -365,7 +371,9 @@ func Variable(name string, t *Type) EnvOption {
//
// - Overloads are searched in the order they are declared
// - Dynamic dispatch for lists and maps is limited by inspection of the list and map contents
-// at runtime. Empty lists and maps will result in a 'default dispatch'
+//
+// at runtime. Empty lists and maps will result in a 'default dispatch'
+//
// - In the event that a default dispatch occurs, the first overload provided is the one invoked
//
// If you intend to use overloads which differentiate based on the key or element type of a list or
@@ -405,7 +413,7 @@ func Function(name string, opts ...FunctionOpt) EnvOption {
// FunctionOpt defines a functional option for configuring a function declaration.
type FunctionOpt func(*functionDecl) (*functionDecl, error)
-// SingletonUnaryBinding creates a singleton function defintion to be used for all function overloads.
+// SingletonUnaryBinding creates a singleton function definition to be used for all function overloads.
//
// Note, this approach works well if operand is expected to have a specific trait which it implements,
// e.g. traits.ContainerType. Otherwise, prefer per-overload function bindings.
@@ -431,7 +439,17 @@ func SingletonUnaryBinding(fn functions.UnaryOp, traits ...int) FunctionOpt {
//
// Note, this approach works well if operand is expected to have a specific trait which it implements,
// e.g. traits.ContainerType. Otherwise, prefer per-overload function bindings.
+//
+// Deprecated: use SingletonBinaryBinding
func SingletonBinaryImpl(fn functions.BinaryOp, traits ...int) FunctionOpt {
+ return SingletonBinaryBinding(fn, traits...)
+}
+
+// SingletonBinaryBinding creates a singleton function definition to be used with all function overloads.
+//
+// Note, this approach works well if operand is expected to have a specific trait which it implements,
+// e.g. traits.ContainerType. Otherwise, prefer per-overload function bindings.
+func SingletonBinaryBinding(fn functions.BinaryOp, traits ...int) FunctionOpt {
trait := 0
for _, t := range traits {
trait = trait | t
@@ -453,7 +471,17 @@ func SingletonBinaryImpl(fn functions.BinaryOp, traits ...int) FunctionOpt {
//
// Note, this approach works well if operand is expected to have a specific trait which it implements,
// e.g. traits.ContainerType. Otherwise, prefer per-overload function bindings.
+//
+// Deprecated: use SingletonFunctionBinding
func SingletonFunctionImpl(fn functions.FunctionOp, traits ...int) FunctionOpt {
+ return SingletonFunctionBinding(fn, traits...)
+}
+
+// SingletonFunctionBinding creates a singleton function definition to be used with all function overloads.
+//
+// Note, this approach works well if operand is expected to have a specific trait which it implements,
+// e.g. traits.ContainerType. Otherwise, prefer per-overload function bindings.
+func SingletonFunctionBinding(fn functions.FunctionOp, traits ...int) FunctionOpt {
trait := 0
for _, t := range traits {
trait = trait | t
@@ -720,9 +748,8 @@ func (f *functionDecl) addOverload(overload *overloadDecl) error {
// Allow redefinition of an overload implementation so long as the signatures match.
f.overloads[index] = overload
return nil
- } else {
- return fmt.Errorf("overload redefinition in function. %s: %s has multiple definitions", f.name, o.id)
}
+ return fmt.Errorf("overload redefinition in function. %s: %s has multiple definitions", f.name, o.id)
}
}
f.overloads = append(f.overloads, overload)
@@ -1177,3 +1204,43 @@ func collectParamNames(paramNames map[string]struct{}, arg *Type) {
collectParamNames(paramNames, param)
}
}
+
+func typeValueToKind(tv *types.TypeValue) (Kind, error) {
+ switch tv {
+ case types.BoolType:
+ return BoolKind, nil
+ case types.DoubleType:
+ return DoubleKind, nil
+ case types.IntType:
+ return IntKind, nil
+ case types.UintType:
+ return UintKind, nil
+ case types.ListType:
+ return ListKind, nil
+ case types.MapType:
+ return MapKind, nil
+ case types.StringType:
+ return StringKind, nil
+ case types.BytesType:
+ return BytesKind, nil
+ case types.DurationType:
+ return DurationKind, nil
+ case types.TimestampType:
+ return TimestampKind, nil
+ case types.NullType:
+ return NullTypeKind, nil
+ case types.TypeType:
+ return TypeKind, nil
+ default:
+ switch tv.TypeName() {
+ case "dyn":
+ return DynKind, nil
+ case "google.protobuf.Any":
+ return AnyKind, nil
+ case "optional":
+ return OpaqueKind, nil
+ default:
+ return 0, fmt.Errorf("no known conversion for type of %s", tv.TypeName())
+ }
+ }
+}
diff --git a/vendor/github.com/google/cel-go/cel/env.go b/vendor/github.com/google/cel-go/cel/env.go
index 4e9ecdd648..d9c2ef63f2 100644
--- a/vendor/github.com/google/cel-go/cel/env.go
+++ b/vendor/github.com/google/cel-go/cel/env.go
@@ -102,15 +102,18 @@ type Env struct {
provider ref.TypeProvider
features map[int]bool
appliedFeatures map[int]bool
+ libraries map[string]bool
// Internal parser representation
- prsr *parser.Parser
+ prsr *parser.Parser
+ prsrOpts []parser.Option
// Internal checker representation
- chk *checker.Env
- chkErr error
- chkOnce sync.Once
- chkOpts []checker.Option
+ chkMutex sync.Mutex
+ chk *checker.Env
+ chkErr error
+ chkOnce sync.Once
+ chkOpts []checker.Option
// Program options tied to the environment
progOpts []ProgramOption
@@ -159,6 +162,7 @@ func NewCustomEnv(opts ...EnvOption) (*Env, error) {
provider: registry,
features: map[int]bool{},
appliedFeatures: map[int]bool{},
+ libraries: map[string]bool{},
progOpts: []ProgramOption{},
}).configure(opts)
}
@@ -175,14 +179,14 @@ func (e *Env) Check(ast *Ast) (*Ast, *Issues) {
pe, _ := AstToParsedExpr(ast)
// Construct the internal checker env, erroring if there is an issue adding the declarations.
- err := e.initChecker()
+ chk, err := e.initChecker()
if err != nil {
errs := common.NewErrors(ast.Source())
- errs.ReportError(common.NoLocation, e.chkErr.Error())
+ errs.ReportError(common.NoLocation, err.Error())
return nil, NewIssues(errs)
}
- res, errs := checker.Check(pe, ast.Source(), e.chk)
+ res, errs := checker.Check(pe, ast.Source(), chk)
if len(errs.GetErrors()) > 0 {
return nil, NewIssues(errs)
}
@@ -236,10 +240,14 @@ func (e *Env) CompileSource(src Source) (*Ast, *Issues) {
// TypeProvider are immutable, or that their underlying implementations are based on the
// ref.TypeRegistry which provides a Copy method which will be invoked by this method.
func (e *Env) Extend(opts ...EnvOption) (*Env, error) {
- if e.chkErr != nil {
- return nil, e.chkErr
+ chk, chkErr := e.getCheckerOrError()
+ if chkErr != nil {
+ return nil, chkErr
}
+ prsrOptsCopy := make([]parser.Option, len(e.prsrOpts))
+ copy(prsrOptsCopy, e.prsrOpts)
+
// The type-checker is configured with Declarations. The declarations may either be provided
// as options which have not yet been validated, or may come from a previous checker instance
// whose types have already been validated.
@@ -248,10 +256,10 @@ func (e *Env) Extend(opts ...EnvOption) (*Env, error) {
// Copy the declarations if needed.
decsCopy := []*exprpb.Decl{}
- if e.chk != nil {
+ if chk != nil {
// If the type-checker has already been instantiated, then the e.declarations have been
- // valdiated within the chk instance.
- chkOptsCopy = append(chkOptsCopy, checker.ValidatedDeclarations(e.chk))
+ // validated within the chk instance.
+ chkOptsCopy = append(chkOptsCopy, checker.ValidatedDeclarations(chk))
} else {
// If the type-checker has not been instantiated, ensure the unvalidated declarations are
// provided to the extended Env instance.
@@ -304,8 +312,11 @@ func (e *Env) Extend(opts ...EnvOption) (*Env, error) {
for k, v := range e.functions {
funcsCopy[k] = v
}
+ libsCopy := make(map[string]bool, len(e.libraries))
+ for k, v := range e.libraries {
+ libsCopy[k] = v
+ }
- // TODO: functions copy needs to happen here.
ext := &Env{
Container: e.Container,
declarations: decsCopy,
@@ -315,8 +326,10 @@ func (e *Env) Extend(opts ...EnvOption) (*Env, error) {
adapter: adapter,
features: featuresCopy,
appliedFeatures: appliedFeaturesCopy,
+ libraries: libsCopy,
provider: provider,
chkOpts: chkOptsCopy,
+ prsrOpts: prsrOptsCopy,
}
return ext.configure(opts)
}
@@ -328,6 +341,12 @@ func (e *Env) HasFeature(flag int) bool {
return has && enabled
}
+// HasLibrary returns whether a specific SingletonLibrary has been configured in the environment.
+func (e *Env) HasLibrary(libName string) bool {
+ configured, exists := e.libraries[libName]
+ return exists && configured
+}
+
// Parse parses the input expression value `txt` to a Ast and/or a set of Issues.
//
// This form of Parse creates a Source value for the input `txt` and forwards to the
@@ -422,8 +441,8 @@ func (e *Env) UnknownVars() interpreter.PartialActivation {
// TODO: Consider adding an option to generate a Program.Residual to avoid round-tripping to an
// Ast format and then Program again.
func (e *Env) ResidualAst(a *Ast, details *EvalDetails) (*Ast, error) {
- pruned := interpreter.PruneAst(a.Expr(), details.State())
- expr, err := AstToString(ParsedExprToAst(&exprpb.ParsedExpr{Expr: pruned}))
+ pruned := interpreter.PruneAst(a.Expr(), a.SourceInfo().GetMacroCalls(), details.State())
+ expr, err := AstToString(ParsedExprToAst(pruned))
if err != nil {
return nil, err
}
@@ -443,12 +462,12 @@ func (e *Env) ResidualAst(a *Ast, details *EvalDetails) (*Ast, error) {
// EstimateCost estimates the cost of a type checked CEL expression using the length estimates of input data and
// extension functions provided by estimator.
-func (e *Env) EstimateCost(ast *Ast, estimator checker.CostEstimator) (checker.CostEstimate, error) {
+func (e *Env) EstimateCost(ast *Ast, estimator checker.CostEstimator, opts ...checker.CostOption) (checker.CostEstimate, error) {
checked, err := AstToCheckedExpr(ast)
if err != nil {
return checker.CostEstimate{}, fmt.Errorf("EsimateCost could not inspect Ast: %v", err)
}
- return checker.Cost(checked, estimator), nil
+ return checker.Cost(checked, estimator, opts...)
}
// configure applies a series of EnvOptions to the current environment.
@@ -464,17 +483,9 @@ func (e *Env) configure(opts []EnvOption) (*Env, error) {
}
// If the default UTC timezone fix has been enabled, make sure the library is configured
- if e.HasFeature(featureDefaultUTCTimeZone) {
- if _, found := e.appliedFeatures[featureDefaultUTCTimeZone]; !found {
- e, err = Lib(timeUTCLibrary{})(e)
- if err != nil {
- return nil, err
- }
- // record that the feature has been applied since it will generate declarations
- // and functions which will be propagated on Extend() calls and which should only
- // be registered once.
- e.appliedFeatures[featureDefaultUTCTimeZone] = true
- }
+ e, err = e.maybeApplyFeature(featureDefaultUTCTimeZone, Lib(timeUTCLibrary{}))
+ if err != nil {
+ return nil, err
}
// Initialize all of the functions configured within the environment.
@@ -486,7 +497,10 @@ func (e *Env) configure(opts []EnvOption) (*Env, error) {
}
// Configure the parser.
- prsrOpts := []parser.Option{parser.Macros(e.macros...)}
+ prsrOpts := []parser.Option{}
+ prsrOpts = append(prsrOpts, e.prsrOpts...)
+ prsrOpts = append(prsrOpts, parser.Macros(e.macros...))
+
if e.HasFeature(featureEnableMacroCallTracking) {
prsrOpts = append(prsrOpts, parser.PopulateMacroCalls(true))
}
@@ -497,7 +511,7 @@ func (e *Env) configure(opts []EnvOption) (*Env, error) {
// Ensure that the checker init happens eagerly rather than lazily.
if e.HasFeature(featureEagerlyValidateDeclarations) {
- err := e.initChecker()
+ _, err := e.initChecker()
if err != nil {
return nil, err
}
@@ -506,7 +520,7 @@ func (e *Env) configure(opts []EnvOption) (*Env, error) {
return e, nil
}
-func (e *Env) initChecker() error {
+func (e *Env) initChecker() (*checker.Env, error) {
e.chkOnce.Do(func() {
chkOpts := []checker.Option{}
chkOpts = append(chkOpts, e.chkOpts...)
@@ -518,32 +532,68 @@ func (e *Env) initChecker() error {
ce, err := checker.NewEnv(e.Container, e.provider, chkOpts...)
if err != nil {
- e.chkErr = err
+ e.setCheckerOrError(nil, err)
return
}
// Add the statically configured declarations.
err = ce.Add(e.declarations...)
if err != nil {
- e.chkErr = err
+ e.setCheckerOrError(nil, err)
return
}
// Add the function declarations which are derived from the FunctionDecl instances.
for _, fn := range e.functions {
fnDecl, err := functionDeclToExprDecl(fn)
if err != nil {
- e.chkErr = err
+ e.setCheckerOrError(nil, err)
return
}
err = ce.Add(fnDecl)
if err != nil {
- e.chkErr = err
+ e.setCheckerOrError(nil, err)
return
}
}
// Add function declarations here separately.
- e.chk = ce
+ e.setCheckerOrError(ce, nil)
})
- return e.chkErr
+ return e.getCheckerOrError()
+}
+
+// setCheckerOrError sets the checker.Env or error state in a concurrency-safe manner
+func (e *Env) setCheckerOrError(chk *checker.Env, chkErr error) {
+ e.chkMutex.Lock()
+ e.chk = chk
+ e.chkErr = chkErr
+ e.chkMutex.Unlock()
+}
+
+// getCheckerOrError gets the checker.Env or error state in a concurrency-safe manner
+func (e *Env) getCheckerOrError() (*checker.Env, error) {
+ e.chkMutex.Lock()
+ defer e.chkMutex.Unlock()
+ return e.chk, e.chkErr
+}
+
+// maybeApplyFeature determines whether the feature-guarded option is enabled, and if so applies
+// the feature if it has not already been enabled.
+func (e *Env) maybeApplyFeature(feature int, option EnvOption) (*Env, error) {
+ if !e.HasFeature(feature) {
+ return e, nil
+ }
+ _, applied := e.appliedFeatures[feature]
+ if applied {
+ return e, nil
+ }
+ e, err := option(e)
+ if err != nil {
+ return nil, err
+ }
+ // record that the feature has been applied since it will generate declarations
+ // and functions which will be propagated on Extend() calls and which should only
+ // be registered once.
+ e.appliedFeatures[feature] = true
+ return e, nil
}
// Issues defines methods for inspecting the error details of parse and check calls.
diff --git a/vendor/github.com/google/cel-go/cel/io.go b/vendor/github.com/google/cel-go/cel/io.go
index e721c97f66..93ded3cf1b 100644
--- a/vendor/github.com/google/cel-go/cel/io.go
+++ b/vendor/github.com/google/cel-go/cel/io.go
@@ -19,14 +19,14 @@ import (
"fmt"
"reflect"
+ "google.golang.org/protobuf/proto"
+
"github.com/google/cel-go/common"
"github.com/google/cel-go/common/types"
"github.com/google/cel-go/common/types/ref"
"github.com/google/cel-go/common/types/traits"
"github.com/google/cel-go/parser"
- "google.golang.org/protobuf/proto"
-
exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
anypb "google.golang.org/protobuf/types/known/anypb"
)
diff --git a/vendor/github.com/google/cel-go/cel/library.go b/vendor/github.com/google/cel-go/cel/library.go
index 5ca528459a..bcfd44f78a 100644
--- a/vendor/github.com/google/cel-go/cel/library.go
+++ b/vendor/github.com/google/cel-go/cel/library.go
@@ -20,10 +20,27 @@ import (
"time"
"github.com/google/cel-go/checker"
+ "github.com/google/cel-go/common"
+ "github.com/google/cel-go/common/operators"
"github.com/google/cel-go/common/overloads"
"github.com/google/cel-go/common/types"
"github.com/google/cel-go/common/types/ref"
+ "github.com/google/cel-go/common/types/traits"
+ "github.com/google/cel-go/interpreter"
"github.com/google/cel-go/interpreter/functions"
+ "github.com/google/cel-go/parser"
+
+ exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
+)
+
+const (
+ optMapMacro = "optMap"
+ hasValueFunc = "hasValue"
+ optionalNoneFunc = "optional.none"
+ optionalOfFunc = "optional.of"
+ optionalOfNonZeroValueFunc = "optional.ofNonZeroValue"
+ valueFunc = "value"
+ unusedIterVar = "#unused"
)
// Library provides a collection of EnvOption and ProgramOption values used to configure a CEL
@@ -42,10 +59,27 @@ type Library interface {
ProgramOptions() []ProgramOption
}
+// SingletonLibrary refines the Library interface to ensure that libraries in this format are only
+// configured once within the environment.
+type SingletonLibrary interface {
+ Library
+
+ // LibraryName provides a namespaced name which is used to check whether the library has already
+ // been configured in the environment.
+ LibraryName() string
+}
+
// Lib creates an EnvOption out of a Library, allowing libraries to be provided as functional args,
// and to be linked to each other.
func Lib(l Library) EnvOption {
+ singleton, isSingleton := l.(SingletonLibrary)
return func(e *Env) (*Env, error) {
+ if isSingleton {
+ if e.HasLibrary(singleton.LibraryName()) {
+ return e, nil
+ }
+ e.libraries[singleton.LibraryName()] = true
+ }
var err error
for _, opt := range l.CompileOptions() {
e, err = opt(e)
@@ -67,6 +101,11 @@ func StdLib() EnvOption {
// features documented in the specification.
type stdLibrary struct{}
+// LibraryName implements the SingletonLibrary interface method.
+func (stdLibrary) LibraryName() string {
+ return "cel.lib.std"
+}
+
// EnvOptions returns options for the standard CEL function declarations and macros.
func (stdLibrary) CompileOptions() []EnvOption {
return []EnvOption{
@@ -82,6 +121,225 @@ func (stdLibrary) ProgramOptions() []ProgramOption {
}
}
+type optionalLibrary struct{}
+
+// LibraryName implements the SingletonLibrary interface method.
+func (optionalLibrary) LibraryName() string {
+ return "cel.lib.optional"
+}
+
+// CompileOptions implements the Library interface method.
+func (optionalLibrary) CompileOptions() []EnvOption {
+ paramTypeK := TypeParamType("K")
+ paramTypeV := TypeParamType("V")
+ optionalTypeV := OptionalType(paramTypeV)
+ listTypeV := ListType(paramTypeV)
+ mapTypeKV := MapType(paramTypeK, paramTypeV)
+
+ return []EnvOption{
+ // Enable the optional syntax in the parser.
+ enableOptionalSyntax(),
+
+ // Introduce the optional type.
+ Types(types.OptionalType),
+
+ // Configure the optMap macro.
+ Macros(NewReceiverMacro(optMapMacro, 2, optMap)),
+
+ // Global and member functions for working with optional values.
+ Function(optionalOfFunc,
+ Overload("optional_of", []*Type{paramTypeV}, optionalTypeV,
+ UnaryBinding(func(value ref.Val) ref.Val {
+ return types.OptionalOf(value)
+ }))),
+ Function(optionalOfNonZeroValueFunc,
+ Overload("optional_ofNonZeroValue", []*Type{paramTypeV}, optionalTypeV,
+ UnaryBinding(func(value ref.Val) ref.Val {
+ v, isZeroer := value.(traits.Zeroer)
+ if !isZeroer || !v.IsZeroValue() {
+ return types.OptionalOf(value)
+ }
+ return types.OptionalNone
+ }))),
+ Function(optionalNoneFunc,
+ Overload("optional_none", []*Type{}, optionalTypeV,
+ FunctionBinding(func(values ...ref.Val) ref.Val {
+ return types.OptionalNone
+ }))),
+ Function(valueFunc,
+ MemberOverload("optional_value", []*Type{optionalTypeV}, paramTypeV,
+ UnaryBinding(func(value ref.Val) ref.Val {
+ opt := value.(*types.Optional)
+ return opt.GetValue()
+ }))),
+ Function(hasValueFunc,
+ MemberOverload("optional_hasValue", []*Type{optionalTypeV}, BoolType,
+ UnaryBinding(func(value ref.Val) ref.Val {
+ opt := value.(*types.Optional)
+ return types.Bool(opt.HasValue())
+ }))),
+
+ // Implementation of 'or' and 'orValue' are special-cased to support short-circuiting in the
+ // evaluation chain.
+ Function("or",
+ MemberOverload("optional_or_optional", []*Type{optionalTypeV, optionalTypeV}, optionalTypeV)),
+ Function("orValue",
+ MemberOverload("optional_orValue_value", []*Type{optionalTypeV, paramTypeV}, paramTypeV)),
+
+ // OptSelect is handled specially by the type-checker, so the receiver's field type is used to determine the
+ // optput type.
+ Function(operators.OptSelect,
+ Overload("select_optional_field", []*Type{DynType, StringType}, optionalTypeV)),
+
+ // OptIndex is handled mostly like any other indexing operation on a list or map, so the type-checker can use
+ // these signatures to determine type-agreement without any special handling.
+ Function(operators.OptIndex,
+ Overload("list_optindex_optional_int", []*Type{listTypeV, IntType}, optionalTypeV),
+ Overload("optional_list_optindex_optional_int", []*Type{OptionalType(listTypeV), IntType}, optionalTypeV),
+ Overload("map_optindex_optional_value", []*Type{mapTypeKV, paramTypeK}, optionalTypeV),
+ Overload("optional_map_optindex_optional_value", []*Type{OptionalType(mapTypeKV), paramTypeK}, optionalTypeV)),
+
+ // Index overloads to accommodate using an optional value as the operand.
+ Function(operators.Index,
+ Overload("optional_list_index_int", []*Type{OptionalType(listTypeV), IntType}, optionalTypeV),
+ Overload("optional_map_index_optional_value", []*Type{OptionalType(mapTypeKV), paramTypeK}, optionalTypeV)),
+ }
+}
+
+func optMap(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *common.Error) {
+ varIdent := args[0]
+ varName := ""
+ switch varIdent.GetExprKind().(type) {
+ case *exprpb.Expr_IdentExpr:
+ varName = varIdent.GetIdentExpr().GetName()
+ default:
+ return nil, &common.Error{
+ Message: "optMap() variable name must be a simple identifier",
+ Location: meh.OffsetLocation(varIdent.GetId()),
+ }
+ }
+ mapExpr := args[1]
+ return meh.GlobalCall(
+ operators.Conditional,
+ meh.ReceiverCall(hasValueFunc, target),
+ meh.GlobalCall(optionalOfFunc,
+ meh.Fold(
+ unusedIterVar,
+ meh.NewList(),
+ varName,
+ meh.ReceiverCall(valueFunc, target),
+ meh.LiteralBool(false),
+ meh.Ident(varName),
+ mapExpr,
+ ),
+ ),
+ meh.GlobalCall(optionalNoneFunc),
+ ), nil
+}
+
+// ProgramOptions implements the Library interface method.
+func (optionalLibrary) ProgramOptions() []ProgramOption {
+ return []ProgramOption{
+ CustomDecorator(decorateOptionalOr),
+ }
+}
+
+func enableOptionalSyntax() EnvOption {
+ return func(e *Env) (*Env, error) {
+ e.prsrOpts = append(e.prsrOpts, parser.EnableOptionalSyntax(true))
+ return e, nil
+ }
+}
+
+func decorateOptionalOr(i interpreter.Interpretable) (interpreter.Interpretable, error) {
+ call, ok := i.(interpreter.InterpretableCall)
+ if !ok {
+ return i, nil
+ }
+ args := call.Args()
+ if len(args) != 2 {
+ return i, nil
+ }
+ switch call.Function() {
+ case "or":
+ if call.OverloadID() != "" && call.OverloadID() != "optional_or_optional" {
+ return i, nil
+ }
+ return &evalOptionalOr{
+ id: call.ID(),
+ lhs: args[0],
+ rhs: args[1],
+ }, nil
+ case "orValue":
+ if call.OverloadID() != "" && call.OverloadID() != "optional_orValue_value" {
+ return i, nil
+ }
+ return &evalOptionalOrValue{
+ id: call.ID(),
+ lhs: args[0],
+ rhs: args[1],
+ }, nil
+ default:
+ return i, nil
+ }
+}
+
+// evalOptionalOr selects between two optional values, either the first if it has a value, or
+// the second optional expression is evaluated and returned.
+type evalOptionalOr struct {
+ id int64
+ lhs interpreter.Interpretable
+ rhs interpreter.Interpretable
+}
+
+// ID implements the Interpretable interface method.
+func (opt *evalOptionalOr) ID() int64 {
+ return opt.id
+}
+
+// Eval evaluates the left-hand side optional to determine whether it contains a value, else
+// proceeds with the right-hand side evaluation.
+func (opt *evalOptionalOr) Eval(ctx interpreter.Activation) ref.Val {
+ // short-circuit lhs.
+ optLHS := opt.lhs.Eval(ctx)
+ optVal, ok := optLHS.(*types.Optional)
+ if !ok {
+ return optLHS
+ }
+ if optVal.HasValue() {
+ return optVal
+ }
+ return opt.rhs.Eval(ctx)
+}
+
+// evalOptionalOrValue selects between an optional or a concrete value. If the optional has a value,
+// its value is returned, otherwise the alternative value expression is evaluated and returned.
+type evalOptionalOrValue struct {
+ id int64
+ lhs interpreter.Interpretable
+ rhs interpreter.Interpretable
+}
+
+// ID implements the Interpretable interface method.
+func (opt *evalOptionalOrValue) ID() int64 {
+ return opt.id
+}
+
+// Eval evaluates the left-hand side optional to determine whether it contains a value, else
+// proceeds with the right-hand side evaluation.
+func (opt *evalOptionalOrValue) Eval(ctx interpreter.Activation) ref.Val {
+ // short-circuit lhs.
+ optLHS := opt.lhs.Eval(ctx)
+ optVal, ok := optLHS.(*types.Optional)
+ if !ok {
+ return optLHS
+ }
+ if optVal.HasValue() {
+ return optVal.GetValue()
+ }
+ return opt.rhs.Eval(ctx)
+}
+
type timeUTCLibrary struct{}
func (timeUTCLibrary) CompileOptions() []EnvOption {
diff --git a/vendor/github.com/google/cel-go/cel/macro.go b/vendor/github.com/google/cel-go/cel/macro.go
index e43cb4eeea..e48c5bf8ee 100644
--- a/vendor/github.com/google/cel-go/cel/macro.go
+++ b/vendor/github.com/google/cel-go/cel/macro.go
@@ -17,6 +17,7 @@ package cel
import (
"github.com/google/cel-go/common"
"github.com/google/cel-go/parser"
+
exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
)
@@ -26,8 +27,11 @@ import (
// a Macro should be created per arg-count or as a var arg macro.
type Macro = parser.Macro
-// MacroExpander converts a call and its associated arguments into a new CEL abstract syntax tree, or an error
-// if the input arguments are not suitable for the expansion requirements for the macro in question.
+// MacroExpander converts a call and its associated arguments into a new CEL abstract syntax tree.
+//
+// If the MacroExpander determines within the implementation that an expansion is not needed it may return
+// a nil Expr value to indicate a non-match. However, if an expansion is to be performed, but the arguments
+// are not well-formed, the result of the expansion will be an error.
//
// The MacroExpander accepts as arguments a MacroExprHelper as well as the arguments used in the function call
// and produces as output an Expr ast node.
@@ -81,8 +85,10 @@ func ExistsOneMacroExpander(meh MacroExprHelper, target *exprpb.Expr, args []*ex
// input to produce an output list.
//
// There are two call patterns supported by map:
-// .map(, )
-// .map(, , )
+//
+// .map(, )
+// .map(, , )
+//
// In the second form only iterVar values which return true when provided to the predicate expression
// are transformed.
func MapMacroExpander(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *common.Error) {
diff --git a/vendor/github.com/google/cel-go/cel/options.go b/vendor/github.com/google/cel-go/cel/options.go
index 21c7570106..07f3d6c716 100644
--- a/vendor/github.com/google/cel-go/cel/options.go
+++ b/vendor/github.com/google/cel-go/cel/options.go
@@ -29,6 +29,7 @@ import (
"github.com/google/cel-go/common/types/ref"
"github.com/google/cel-go/interpreter"
"github.com/google/cel-go/interpreter/functions"
+ "github.com/google/cel-go/parser"
exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
descpb "google.golang.org/protobuf/types/descriptorpb"
@@ -61,6 +62,10 @@ const (
// on a CEL timestamp operation. This fixes the scenario where the input time
// is not already in UTC.
featureDefaultUTCTimeZone
+
+ // Enable the use of optional types in the syntax, type-system, type-checking,
+ // and runtime.
+ featureOptionalTypes
)
// EnvOption is a functional interface for configuring the environment.
@@ -163,19 +168,19 @@ func Container(name string) EnvOption {
// Abbreviations can be useful when working with variables, functions, and especially types from
// multiple namespaces:
//
-// // CEL object construction
-// qual.pkg.version.ObjTypeName{
-// field: alt.container.ver.FieldTypeName{value: ...}
-// }
+// // CEL object construction
+// qual.pkg.version.ObjTypeName{
+// field: alt.container.ver.FieldTypeName{value: ...}
+// }
//
// Only one the qualified names above may be used as the CEL container, so at least one of these
// references must be a long qualified name within an otherwise short CEL program. Using the
// following abbreviations, the program becomes much simpler:
//
-// // CEL Go option
-// Abbrevs("qual.pkg.version.ObjTypeName", "alt.container.ver.FieldTypeName")
-// // Simplified Object construction
-// ObjTypeName{field: FieldTypeName{value: ...}}
+// // CEL Go option
+// Abbrevs("qual.pkg.version.ObjTypeName", "alt.container.ver.FieldTypeName")
+// // Simplified Object construction
+// ObjTypeName{field: FieldTypeName{value: ...}}
//
// There are a few rules for the qualified names and the simple abbreviations generated from them:
// - Qualified names must be dot-delimited, e.g. `package.subpkg.name`.
@@ -188,9 +193,12 @@ func Container(name string) EnvOption {
// - Expanded abbreviations do not participate in namespace resolution.
// - Abbreviation expansion is done instead of the container search for a matching identifier.
// - Containers follow C++ namespace resolution rules with searches from the most qualified name
-// to the least qualified name.
+//
+// to the least qualified name.
+//
// - Container references within the CEL program may be relative, and are resolved to fully
-// qualified names at either type-check time or program plan time, whichever comes first.
+//
+// qualified names at either type-check time or program plan time, whichever comes first.
//
// If there is ever a case where an identifier could be in both the container and as an
// abbreviation, the abbreviation wins as this will ensure that the meaning of a program is
@@ -216,7 +224,7 @@ func Abbrevs(qualifiedNames ...string) EnvOption {
// environment by default.
//
// Note: This option must be specified after the CustomTypeProvider option when used together.
-func Types(addTypes ...interface{}) EnvOption {
+func Types(addTypes ...any) EnvOption {
return func(e *Env) (*Env, error) {
reg, isReg := e.provider.(ref.TypeRegistry)
if !isReg {
@@ -253,7 +261,7 @@ func Types(addTypes ...interface{}) EnvOption {
//
// TypeDescs are hermetic to a single Env object, but may be copied to other Env values via
// extension or by re-using the same EnvOption with another NewEnv() call.
-func TypeDescs(descs ...interface{}) EnvOption {
+func TypeDescs(descs ...any) EnvOption {
return func(e *Env) (*Env, error) {
reg, isReg := e.provider.(ref.TypeRegistry)
if !isReg {
@@ -350,8 +358,8 @@ func Functions(funcs ...*functions.Overload) ProgramOption {
// variables with the same name provided to the Eval() call. If Globals is used in a Library with
// a Lib EnvOption, vars may shadow variables provided by previously added libraries.
//
-// The vars value may either be an `interpreter.Activation` instance or a `map[string]interface{}`.
-func Globals(vars interface{}) ProgramOption {
+// The vars value may either be an `interpreter.Activation` instance or a `map[string]any`.
+func Globals(vars any) ProgramOption {
return func(p *prog) (*prog, error) {
defaultVars, err := interpreter.NewActivation(vars)
if err != nil {
@@ -404,6 +412,9 @@ const (
// OptTrackCost enables the runtime cost calculation while validation and return cost within evalDetails
// cost calculation is available via func ActualCost()
OptTrackCost EvalOption = 1 << iota
+
+ // OptCheckStringFormat enables compile-time checking of string.format calls for syntax/cardinality.
+ OptCheckStringFormat EvalOption = 1 << iota
)
// EvalOptions sets one or more evaluation options which may affect the evaluation or Result.
@@ -534,6 +545,13 @@ func DefaultUTCTimeZone(enabled bool) EnvOption {
return features(featureDefaultUTCTimeZone, enabled)
}
+// OptionalTypes enable support for optional syntax and types in CEL. The optional value type makes
+// it possible to express whether variables have been provided, whether a result has been computed,
+// and in the future whether an object field path, map key value, or list index has a value.
+func OptionalTypes() EnvOption {
+ return Lib(optionalLibrary{})
+}
+
// features sets the given feature flags. See list of Feature constants above.
func features(flag int, enabled bool) EnvOption {
return func(e *Env) (*Env, error) {
@@ -541,3 +559,21 @@ func features(flag int, enabled bool) EnvOption {
return e, nil
}
}
+
+// ParserRecursionLimit adjusts the AST depth the parser will tolerate.
+// Defaults defined in the parser package.
+func ParserRecursionLimit(limit int) EnvOption {
+ return func(e *Env) (*Env, error) {
+ e.prsrOpts = append(e.prsrOpts, parser.MaxRecursionDepth(limit))
+ return e, nil
+ }
+}
+
+// ParserExpressionSizeLimit adjusts the number of code points the expression parser is allowed to parse.
+// Defaults defined in the parser package.
+func ParserExpressionSizeLimit(limit int) EnvOption {
+ return func(e *Env) (*Env, error) {
+ e.prsrOpts = append(e.prsrOpts, parser.ExpressionSizeCodePointLimit(limit))
+ return e, nil
+ }
+}
diff --git a/vendor/github.com/google/cel-go/cel/program.go b/vendor/github.com/google/cel-go/cel/program.go
index 6219a4da58..a630f5bfa1 100644
--- a/vendor/github.com/google/cel-go/cel/program.go
+++ b/vendor/github.com/google/cel-go/cel/program.go
@@ -17,21 +17,20 @@ package cel
import (
"context"
"fmt"
- "math"
"sync"
- exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
-
"github.com/google/cel-go/common/types"
"github.com/google/cel-go/common/types/ref"
"github.com/google/cel-go/interpreter"
+
+ exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
)
// Program is an evaluable view of an Ast.
type Program interface {
// Eval returns the result of an evaluation of the Ast and environment against the input vars.
//
- // The vars value may either be an `interpreter.Activation` or a `map[string]interface{}`.
+ // The vars value may either be an `interpreter.Activation` or a `map[string]any`.
//
// If the `OptTrackState`, `OptTrackCost` or `OptExhaustiveEval` flags are used, the `details` response will
// be non-nil. Given this caveat on `details`, the return state from evaluation will be:
@@ -43,16 +42,16 @@ type Program interface {
// An unsuccessful evaluation is typically the result of a series of incompatible `EnvOption`
// or `ProgramOption` values used in the creation of the evaluation environment or executable
// program.
- Eval(interface{}) (ref.Val, *EvalDetails, error)
+ Eval(any) (ref.Val, *EvalDetails, error)
// ContextEval evaluates the program with a set of input variables and a context object in order
// to support cancellation and timeouts. This method must be used in conjunction with the
// InterruptCheckFrequency() option for cancellation interrupts to be impact evaluation.
//
- // The vars value may either be an `interpreter.Activation` or `map[string]interface{}`.
+ // The vars value may either be an `interpreter.Activation` or `map[string]any`.
//
// The output contract for `ContextEval` is otherwise identical to the `Eval` method.
- ContextEval(context.Context, interface{}) (ref.Val, *EvalDetails, error)
+ ContextEval(context.Context, any) (ref.Val, *EvalDetails, error)
}
// NoVars returns an empty Activation.
@@ -65,7 +64,7 @@ func NoVars() interpreter.Activation {
//
// The `vars` value may either be an interpreter.Activation or any valid input to the
// interpreter.NewActivation call.
-func PartialVars(vars interface{},
+func PartialVars(vars any,
unknowns ...*interpreter.AttributePattern) (interpreter.PartialActivation, error) {
return interpreter.NewPartialActivation(vars, unknowns...)
}
@@ -207,6 +206,37 @@ func newProgram(e *Env, ast *Ast, opts []ProgramOption) (Program, error) {
if len(p.regexOptimizations) > 0 {
decorators = append(decorators, interpreter.CompileRegexConstants(p.regexOptimizations...))
}
+ // Enable compile-time checking of syntax/cardinality for string.format calls.
+ if p.evalOpts&OptCheckStringFormat == OptCheckStringFormat {
+ var isValidType func(id int64, validTypes ...*types.TypeValue) (bool, error)
+ if ast.IsChecked() {
+ isValidType = func(id int64, validTypes ...*types.TypeValue) (bool, error) {
+ t, err := ExprTypeToType(ast.typeMap[id])
+ if err != nil {
+ return false, err
+ }
+ if t.kind == DynKind {
+ return true, nil
+ }
+ for _, vt := range validTypes {
+ k, err := typeValueToKind(vt)
+ if err != nil {
+ return false, err
+ }
+ if k == t.kind {
+ return true, nil
+ }
+ }
+ return false, nil
+ }
+ } else {
+ // if the AST isn't type-checked, short-circuit validation
+ isValidType = func(id int64, validTypes ...*types.TypeValue) (bool, error) {
+ return true, nil
+ }
+ }
+ decorators = append(decorators, interpreter.InterpolateFormattedString(isValidType))
+ }
// Enable exhaustive eval, state tracking and cost tracking last since they require a factory.
if p.evalOpts&(OptExhaustiveEval|OptTrackState|OptTrackCost) != 0 {
@@ -268,7 +298,7 @@ func (p *prog) initInterpretable(ast *Ast, decs []interpreter.InterpretableDecor
}
// Eval implements the Program interface method.
-func (p *prog) Eval(input interface{}) (v ref.Val, det *EvalDetails, err error) {
+func (p *prog) Eval(input any) (v ref.Val, det *EvalDetails, err error) {
// Configure error recovery for unexpected panics during evaluation. Note, the use of named
// return values makes it possible to modify the error response during the recovery
// function.
@@ -287,11 +317,11 @@ func (p *prog) Eval(input interface{}) (v ref.Val, det *EvalDetails, err error)
switch v := input.(type) {
case interpreter.Activation:
vars = v
- case map[string]interface{}:
+ case map[string]any:
vars = activationPool.Setup(v)
defer activationPool.Put(vars)
default:
- return nil, nil, fmt.Errorf("invalid input, wanted Activation or map[string]interface{}, got: (%T)%v", input, input)
+ return nil, nil, fmt.Errorf("invalid input, wanted Activation or map[string]any, got: (%T)%v", input, input)
}
if p.defaultVars != nil {
vars = interpreter.NewHierarchicalActivation(p.defaultVars, vars)
@@ -307,7 +337,7 @@ func (p *prog) Eval(input interface{}) (v ref.Val, det *EvalDetails, err error)
}
// ContextEval implements the Program interface.
-func (p *prog) ContextEval(ctx context.Context, input interface{}) (ref.Val, *EvalDetails, error) {
+func (p *prog) ContextEval(ctx context.Context, input any) (ref.Val, *EvalDetails, error) {
if ctx == nil {
return nil, nil, fmt.Errorf("context can not be nil")
}
@@ -318,22 +348,17 @@ func (p *prog) ContextEval(ctx context.Context, input interface{}) (ref.Val, *Ev
case interpreter.Activation:
vars = ctxActivationPool.Setup(v, ctx.Done(), p.interruptCheckFrequency)
defer ctxActivationPool.Put(vars)
- case map[string]interface{}:
+ case map[string]any:
rawVars := activationPool.Setup(v)
defer activationPool.Put(rawVars)
vars = ctxActivationPool.Setup(rawVars, ctx.Done(), p.interruptCheckFrequency)
defer ctxActivationPool.Put(vars)
default:
- return nil, nil, fmt.Errorf("invalid input, wanted Activation or map[string]interface{}, got: (%T)%v", input, input)
+ return nil, nil, fmt.Errorf("invalid input, wanted Activation or map[string]any, got: (%T)%v", input, input)
}
return p.Eval(vars)
}
-// Cost implements the Coster interface method.
-func (p *prog) Cost() (min, max int64) {
- return estimateCost(p.interpretable)
-}
-
// progFactory is a helper alias for marking a program creation factory function.
type progFactory func(interpreter.EvalState, *interpreter.CostTracker) (Program, error)
@@ -354,7 +379,7 @@ func newProgGen(factory progFactory) (Program, error) {
}
// Eval implements the Program interface method.
-func (gen *progGen) Eval(input interface{}) (ref.Val, *EvalDetails, error) {
+func (gen *progGen) Eval(input any) (ref.Val, *EvalDetails, error) {
// The factory based Eval() differs from the standard evaluation model in that it generates a
// new EvalState instance for each call to ensure that unique evaluations yield unique stateful
// results.
@@ -379,7 +404,7 @@ func (gen *progGen) Eval(input interface{}) (ref.Val, *EvalDetails, error) {
}
// ContextEval implements the Program interface method.
-func (gen *progGen) ContextEval(ctx context.Context, input interface{}) (ref.Val, *EvalDetails, error) {
+func (gen *progGen) ContextEval(ctx context.Context, input any) (ref.Val, *EvalDetails, error) {
if ctx == nil {
return nil, nil, fmt.Errorf("context can not be nil")
}
@@ -406,29 +431,6 @@ func (gen *progGen) ContextEval(ctx context.Context, input interface{}) (ref.Val
return v, det, nil
}
-// Cost implements the Coster interface method.
-func (gen *progGen) Cost() (min, max int64) {
- // Use an empty state value since no evaluation is performed.
- p, err := gen.factory(emptyEvalState, nil)
- if err != nil {
- return 0, math.MaxInt64
- }
- return estimateCost(p)
-}
-
-// EstimateCost returns the heuristic cost interval for the program.
-func EstimateCost(p Program) (min, max int64) {
- return estimateCost(p)
-}
-
-func estimateCost(i interface{}) (min, max int64) {
- c, ok := i.(interpreter.Coster)
- if !ok {
- return 0, math.MaxInt64
- }
- return c.Cost()
-}
-
type ctxEvalActivation struct {
parent interpreter.Activation
interrupt <-chan struct{}
@@ -438,7 +440,7 @@ type ctxEvalActivation struct {
// ResolveName implements the Activation interface method, but adds a special #interrupted variable
// which is capable of testing whether a 'done' signal is provided from a context.Context channel.
-func (a *ctxEvalActivation) ResolveName(name string) (interface{}, bool) {
+func (a *ctxEvalActivation) ResolveName(name string) (any, bool) {
if name == "#interrupted" {
a.interruptCheckCount++
if a.interruptCheckCount%a.interruptCheckFrequency == 0 {
@@ -461,7 +463,7 @@ func (a *ctxEvalActivation) Parent() interpreter.Activation {
func newCtxEvalActivationPool() *ctxEvalActivationPool {
return &ctxEvalActivationPool{
Pool: sync.Pool{
- New: func() interface{} {
+ New: func() any {
return &ctxEvalActivation{}
},
},
@@ -483,21 +485,21 @@ func (p *ctxEvalActivationPool) Setup(vars interpreter.Activation, done <-chan s
}
type evalActivation struct {
- vars map[string]interface{}
- lazyVars map[string]interface{}
+ vars map[string]any
+ lazyVars map[string]any
}
// ResolveName looks up the value of the input variable name, if found.
//
// Lazy bindings may be supplied within the map-based input in either of the following forms:
-// - func() interface{}
+// - func() any
// - func() ref.Val
//
// The lazy binding will only be invoked once per evaluation.
//
// Values which are not represented as ref.Val types on input may be adapted to a ref.Val using
// the ref.TypeAdapter configured in the environment.
-func (a *evalActivation) ResolveName(name string) (interface{}, bool) {
+func (a *evalActivation) ResolveName(name string) (any, bool) {
v, found := a.vars[name]
if !found {
return nil, false
@@ -510,7 +512,7 @@ func (a *evalActivation) ResolveName(name string) (interface{}, bool) {
lazy := obj()
a.lazyVars[name] = lazy
return lazy, true
- case func() interface{}:
+ case func() any:
if resolved, found := a.lazyVars[name]; found {
return resolved, true
}
@@ -530,8 +532,8 @@ func (a *evalActivation) Parent() interpreter.Activation {
func newEvalActivationPool() *evalActivationPool {
return &evalActivationPool{
Pool: sync.Pool{
- New: func() interface{} {
- return &evalActivation{lazyVars: make(map[string]interface{})}
+ New: func() any {
+ return &evalActivation{lazyVars: make(map[string]any)}
},
},
}
@@ -542,13 +544,13 @@ type evalActivationPool struct {
}
// Setup initializes a pooled Activation object with the map input.
-func (p *evalActivationPool) Setup(vars map[string]interface{}) *evalActivation {
+func (p *evalActivationPool) Setup(vars map[string]any) *evalActivation {
a := p.Pool.Get().(*evalActivation)
a.vars = vars
return a
}
-func (p *evalActivationPool) Put(value interface{}) {
+func (p *evalActivationPool) Put(value any) {
a := value.(*evalActivation)
for k := range a.lazyVars {
delete(a.lazyVars, k)
@@ -559,7 +561,7 @@ func (p *evalActivationPool) Put(value interface{}) {
var (
emptyEvalState = interpreter.NewEvalState()
- // activationPool is an internally managed pool of Activation values that wrap map[string]interface{} inputs
+ // activationPool is an internally managed pool of Activation values that wrap map[string]any inputs
activationPool = newEvalActivationPool()
// ctxActivationPool is an internally managed pool of Activation values that expose a special #interrupted variable
diff --git a/vendor/github.com/google/cel-go/checker/BUILD.bazel b/vendor/github.com/google/cel-go/checker/BUILD.bazel
index bec40b6e69..1c6ddb7f7d 100644
--- a/vendor/github.com/google/cel-go/checker/BUILD.bazel
+++ b/vendor/github.com/google/cel-go/checker/BUILD.bazel
@@ -30,7 +30,7 @@ go_library(
"//common/types/pb:go_default_library",
"//common/types/ref:go_default_library",
"//parser:go_default_library",
- "@org_golang_google_genproto//googleapis/api/expr/v1alpha1:go_default_library",
+ "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library",
"@org_golang_google_protobuf//proto:go_default_library",
"@org_golang_google_protobuf//types/known/emptypb:go_default_library",
"@org_golang_google_protobuf//types/known/structpb:go_default_library",
@@ -54,7 +54,7 @@ go_test(
"//test:go_default_library",
"//test/proto2pb:go_default_library",
"//test/proto3pb:go_default_library",
- "@com_github_antlr_antlr4_runtime_go_antlr//:go_default_library",
+ "@com_github_antlr_antlr4_runtime_go_antlr_v4//:go_default_library",
"@org_golang_google_protobuf//proto:go_default_library",
],
)
diff --git a/vendor/github.com/google/cel-go/checker/checker.go b/vendor/github.com/google/cel-go/checker/checker.go
index fcddb1b2c2..257cffecf6 100644
--- a/vendor/github.com/google/cel-go/checker/checker.go
+++ b/vendor/github.com/google/cel-go/checker/checker.go
@@ -23,6 +23,7 @@ import (
"github.com/google/cel-go/checker/decls"
"github.com/google/cel-go/common"
"github.com/google/cel-go/common/containers"
+ "github.com/google/cel-go/common/operators"
"github.com/google/cel-go/common/types/ref"
"google.golang.org/protobuf/proto"
@@ -173,8 +174,8 @@ func (c *checker) checkSelect(e *exprpb.Expr) {
// Rewrite the node to be a variable reference to the resolved fully-qualified
// variable name.
- c.setType(e, ident.GetIdent().Type)
- c.setReference(e, newIdentReference(ident.GetName(), ident.GetIdent().Value))
+ c.setType(e, ident.GetIdent().GetType())
+ c.setReference(e, newIdentReference(ident.GetName(), ident.GetIdent().GetValue()))
identName := ident.GetName()
e.ExprKind = &exprpb.Expr_IdentExpr{
IdentExpr: &exprpb.Expr_Ident{
@@ -185,9 +186,37 @@ func (c *checker) checkSelect(e *exprpb.Expr) {
}
}
+ resultType := c.checkSelectField(e, sel.GetOperand(), sel.GetField(), false)
+ if sel.TestOnly {
+ resultType = decls.Bool
+ }
+ c.setType(e, substitute(c.mappings, resultType, false))
+}
+
+func (c *checker) checkOptSelect(e *exprpb.Expr) {
+ // Collect metadata related to the opt select call packaged by the parser.
+ call := e.GetCallExpr()
+ operand := call.GetArgs()[0]
+ field := call.GetArgs()[1]
+ fieldName, isString := maybeUnwrapString(field)
+ if !isString {
+ c.errors.ReportError(c.location(field), "unsupported optional field selection: %v", field)
+ return
+ }
+
+ // Perform type-checking using the field selection logic.
+ resultType := c.checkSelectField(e, operand, fieldName, true)
+ c.setType(e, substitute(c.mappings, resultType, false))
+}
+
+func (c *checker) checkSelectField(e, operand *exprpb.Expr, field string, optional bool) *exprpb.Type {
// Interpret as field selection, first traversing down the operand.
- c.check(sel.GetOperand())
- targetType := substitute(c.mappings, c.getType(sel.GetOperand()), false)
+ c.check(operand)
+ operandType := substitute(c.mappings, c.getType(operand), false)
+
+ // If the target type is 'optional', unwrap it for the sake of this check.
+ targetType, isOpt := maybeUnwrapOptional(operandType)
+
// Assume error type by default as most types do not support field selection.
resultType := decls.Error
switch kindOf(targetType) {
@@ -199,7 +228,7 @@ func (c *checker) checkSelect(e *exprpb.Expr) {
// Objects yield their field type declaration as the selection result type, but only if
// the field is defined.
messageType := targetType
- if fieldType, found := c.lookupFieldType(c.location(e), messageType.GetMessageType(), sel.GetField()); found {
+ if fieldType, found := c.lookupFieldType(c.location(e), messageType.GetMessageType(), field); found {
resultType = fieldType.Type
}
case kindTypeParam:
@@ -212,16 +241,17 @@ func (c *checker) checkSelect(e *exprpb.Expr) {
default:
// Dynamic / error values are treated as DYN type. Errors are handled this way as well
// in order to allow forward progress on the check.
- if isDynOrError(targetType) {
- resultType = decls.Dyn
- } else {
+ if !isDynOrError(targetType) {
c.errors.typeDoesNotSupportFieldSelection(c.location(e), targetType)
}
+ resultType = decls.Dyn
}
- if sel.TestOnly {
- resultType = decls.Bool
+
+ // If the target type was optional coming in, then the result must be optional going out.
+ if isOpt || optional {
+ return decls.NewOptionalType(resultType)
}
- c.setType(e, substitute(c.mappings, resultType, false))
+ return resultType
}
func (c *checker) checkCall(e *exprpb.Expr) {
@@ -229,15 +259,19 @@ func (c *checker) checkCall(e *exprpb.Expr) {
// please consider the impact on planner.go and consolidate implementations or mirror code
// as appropriate.
call := e.GetCallExpr()
- target := call.GetTarget()
- args := call.GetArgs()
fnName := call.GetFunction()
+ if fnName == operators.OptSelect {
+ c.checkOptSelect(e)
+ return
+ }
+ args := call.GetArgs()
// Traverse arguments.
for _, arg := range args {
c.check(arg)
}
+ target := call.GetTarget()
// Regular static call with simple name.
if target == nil {
// Check for the existence of the function.
@@ -359,6 +393,9 @@ func (c *checker) resolveOverload(
}
if resultType == nil {
+ for i, arg := range argTypes {
+ argTypes[i] = substitute(c.mappings, arg, true)
+ }
c.errors.noMatchingOverload(loc, fn.GetName(), argTypes, target != nil)
resultType = decls.Error
return nil
@@ -369,16 +406,29 @@ func (c *checker) resolveOverload(
func (c *checker) checkCreateList(e *exprpb.Expr) {
create := e.GetListExpr()
- var elemType *exprpb.Type
- for _, e := range create.GetElements() {
+ var elemsType *exprpb.Type
+ optionalIndices := create.GetOptionalIndices()
+ optionals := make(map[int32]bool, len(optionalIndices))
+ for _, optInd := range optionalIndices {
+ optionals[optInd] = true
+ }
+ for i, e := range create.GetElements() {
c.check(e)
- elemType = c.joinTypes(c.location(e), elemType, c.getType(e))
+ elemType := c.getType(e)
+ if optionals[int32(i)] {
+ var isOptional bool
+ elemType, isOptional = maybeUnwrapOptional(elemType)
+ if !isOptional && !isDyn(elemType) {
+ c.errors.typeMismatch(c.location(e), decls.NewOptionalType(elemType), elemType)
+ }
+ }
+ elemsType = c.joinTypes(c.location(e), elemsType, elemType)
}
- if elemType == nil {
+ if elemsType == nil {
// If the list is empty, assign free type var to elem type.
- elemType = c.newTypeVar()
+ elemsType = c.newTypeVar()
}
- c.setType(e, decls.NewListType(elemType))
+ c.setType(e, decls.NewListType(elemsType))
}
func (c *checker) checkCreateStruct(e *exprpb.Expr) {
@@ -392,22 +442,31 @@ func (c *checker) checkCreateStruct(e *exprpb.Expr) {
func (c *checker) checkCreateMap(e *exprpb.Expr) {
mapVal := e.GetStructExpr()
- var keyType *exprpb.Type
- var valueType *exprpb.Type
+ var mapKeyType *exprpb.Type
+ var mapValueType *exprpb.Type
for _, ent := range mapVal.GetEntries() {
key := ent.GetMapKey()
c.check(key)
- keyType = c.joinTypes(c.location(key), keyType, c.getType(key))
-
- c.check(ent.Value)
- valueType = c.joinTypes(c.location(ent.Value), valueType, c.getType(ent.Value))
+ mapKeyType = c.joinTypes(c.location(key), mapKeyType, c.getType(key))
+
+ val := ent.GetValue()
+ c.check(val)
+ valType := c.getType(val)
+ if ent.GetOptionalEntry() {
+ var isOptional bool
+ valType, isOptional = maybeUnwrapOptional(valType)
+ if !isOptional && !isDyn(valType) {
+ c.errors.typeMismatch(c.location(val), decls.NewOptionalType(valType), valType)
+ }
+ }
+ mapValueType = c.joinTypes(c.location(val), mapValueType, valType)
}
- if keyType == nil {
+ if mapKeyType == nil {
// If the map is empty, assign free type variables to typeKey and value type.
- keyType = c.newTypeVar()
- valueType = c.newTypeVar()
+ mapKeyType = c.newTypeVar()
+ mapValueType = c.newTypeVar()
}
- c.setType(e, decls.NewMapType(keyType, valueType))
+ c.setType(e, decls.NewMapType(mapKeyType, mapValueType))
}
func (c *checker) checkCreateMessage(e *exprpb.Expr) {
@@ -449,15 +508,21 @@ func (c *checker) checkCreateMessage(e *exprpb.Expr) {
c.check(value)
fieldType := decls.Error
- if t, found := c.lookupFieldType(
- c.locationByID(ent.GetId()),
- messageType.GetMessageType(),
- field); found {
- fieldType = t.Type
+ ft, found := c.lookupFieldType(c.locationByID(ent.GetId()), messageType.GetMessageType(), field)
+ if found {
+ fieldType = ft.Type
+ }
+
+ valType := c.getType(value)
+ if ent.GetOptionalEntry() {
+ var isOptional bool
+ valType, isOptional = maybeUnwrapOptional(valType)
+ if !isOptional && !isDyn(valType) {
+ c.errors.typeMismatch(c.location(value), decls.NewOptionalType(valType), valType)
+ }
}
- if !c.isAssignable(fieldType, c.getType(value)) {
- c.errors.fieldTypeMismatch(
- c.locationByID(ent.Id), field, fieldType, c.getType(value))
+ if !c.isAssignable(fieldType, valType) {
+ c.errors.fieldTypeMismatch(c.locationByID(ent.Id), field, fieldType, valType)
}
}
}
diff --git a/vendor/github.com/google/cel-go/checker/cost.go b/vendor/github.com/google/cel-go/checker/cost.go
index 5ccf904b85..ef58df766b 100644
--- a/vendor/github.com/google/cel-go/checker/cost.go
+++ b/vendor/github.com/google/cel-go/checker/cost.go
@@ -92,7 +92,10 @@ func (e astNode) ComputedSize() *SizeEstimate {
case *exprpb.Expr_ConstExpr:
switch ck := ek.ConstExpr.GetConstantKind().(type) {
case *exprpb.Constant_StringValue:
- v = uint64(len(ck.StringValue))
+ // converting to runes here is an O(n) operation, but
+ // this is consistent with how size is computed at runtime,
+ // and how the language definition defines string size
+ v = uint64(len([]rune(ck.StringValue)))
case *exprpb.Constant_BytesValue:
v = uint64(len(ck.BytesValue))
case *exprpb.Constant_BoolValue, *exprpb.Constant_DoubleValue, *exprpb.Constant_DurationValue,
@@ -258,6 +261,8 @@ type coster struct {
computedSizes map[int64]SizeEstimate
checkedExpr *exprpb.CheckedExpr
estimator CostEstimator
+ // presenceTestCost will either be a zero or one based on whether has() macros count against cost computations.
+ presenceTestCost CostEstimate
}
// Use a stack of iterVar -> iterRange Expr Ids to handle shadowed variable names.
@@ -280,16 +285,39 @@ func (vs iterRangeScopes) peek(varName string) (int64, bool) {
return 0, false
}
+// CostOption configures flags which affect cost computations.
+type CostOption func(*coster) error
+
+// PresenceTestHasCost determines whether presence testing has a cost of one or zero.
+// Defaults to presence test has a cost of one.
+func PresenceTestHasCost(hasCost bool) CostOption {
+ return func(c *coster) error {
+ if hasCost {
+ c.presenceTestCost = selectAndIdentCost
+ return nil
+ }
+ c.presenceTestCost = CostEstimate{Min: 0, Max: 0}
+ return nil
+ }
+}
+
// Cost estimates the cost of the parsed and type checked CEL expression.
-func Cost(checker *exprpb.CheckedExpr, estimator CostEstimator) CostEstimate {
- c := coster{
- checkedExpr: checker,
- estimator: estimator,
- exprPath: map[int64][]string{},
- iterRanges: map[string][]int64{},
- computedSizes: map[int64]SizeEstimate{},
+func Cost(checker *exprpb.CheckedExpr, estimator CostEstimator, opts ...CostOption) (CostEstimate, error) {
+ c := &coster{
+ checkedExpr: checker,
+ estimator: estimator,
+ exprPath: map[int64][]string{},
+ iterRanges: map[string][]int64{},
+ computedSizes: map[int64]SizeEstimate{},
+ presenceTestCost: CostEstimate{Min: 1, Max: 1},
+ }
+ for _, opt := range opts {
+ err := opt(c)
+ if err != nil {
+ return CostEstimate{}, err
+ }
}
- return c.cost(checker.GetExpr())
+ return c.cost(checker.GetExpr()), nil
}
func (c *coster) cost(e *exprpb.Expr) CostEstimate {
@@ -340,6 +368,12 @@ func (c *coster) costSelect(e *exprpb.Expr) CostEstimate {
sel := e.GetSelectExpr()
var sum CostEstimate
if sel.GetTestOnly() {
+ // recurse, but do not add any cost
+ // this is equivalent to how evalTestOnly increments the runtime cost counter
+ // but does not add any additional cost for the qualifier, except here we do
+ // the reverse (ident adds cost)
+ sum = sum.Add(c.presenceTestCost)
+ sum = sum.Add(c.cost(sel.GetOperand()))
return sum
}
sum = sum.Add(c.cost(sel.GetOperand()))
@@ -503,6 +537,11 @@ func (c *coster) functionCost(function, overloadID string, target *AstNode, args
}
switch overloadID {
// O(n) functions
+ case overloads.ExtFormatString:
+ if target != nil {
+ // ResultSize not calculated because we can't bound the max size.
+ return CallEstimate{CostEstimate: c.sizeEstimate(*target).MultiplyByCostFactor(common.StringTraversalCostFactor).Add(argCostSum())}
+ }
case overloads.StringToBytes:
if len(args) == 1 {
sz := c.sizeEstimate(args[0])
@@ -515,6 +554,12 @@ func (c *coster) functionCost(function, overloadID string, target *AstNode, args
// ResultSize min is when 4 bytes convert to 1 char.
return CallEstimate{CostEstimate: sz.MultiplyByCostFactor(common.StringTraversalCostFactor).Add(argCostSum()), ResultSize: &SizeEstimate{Min: sz.Min / 4, Max: sz.Max}}
}
+ case overloads.ExtQuoteString:
+ if len(args) == 1 {
+ sz := c.sizeEstimate(args[0])
+ // ResultSize max is when each char is escaped. 2 quote chars always added.
+ return CallEstimate{CostEstimate: sz.MultiplyByCostFactor(common.StringTraversalCostFactor).Add(argCostSum()), ResultSize: &SizeEstimate{Min: sz.Min + 2, Max: sz.Max*2 + 2}}
+ }
case overloads.StartsWithString, overloads.EndsWithString:
if len(args) == 1 {
return CallEstimate{CostEstimate: c.sizeEstimate(args[0]).MultiplyByCostFactor(common.StringTraversalCostFactor).Add(argCostSum())}
diff --git a/vendor/github.com/google/cel-go/checker/decls/BUILD.bazel b/vendor/github.com/google/cel-go/checker/decls/BUILD.bazel
index 5a24f1da80..9384be4507 100644
--- a/vendor/github.com/google/cel-go/checker/decls/BUILD.bazel
+++ b/vendor/github.com/google/cel-go/checker/decls/BUILD.bazel
@@ -13,7 +13,7 @@ go_library(
],
importpath = "github.com/google/cel-go/checker/decls",
deps = [
- "@org_golang_google_genproto//googleapis/api/expr/v1alpha1:go_default_library",
+ "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library",
"@org_golang_google_protobuf//types/known/emptypb:go_default_library",
"@org_golang_google_protobuf//types/known/structpb:go_default_library",
],
diff --git a/vendor/github.com/google/cel-go/checker/decls/decls.go b/vendor/github.com/google/cel-go/checker/decls/decls.go
index 88a99282d9..0d91bef514 100644
--- a/vendor/github.com/google/cel-go/checker/decls/decls.go
+++ b/vendor/github.com/google/cel-go/checker/decls/decls.go
@@ -16,9 +16,9 @@
package decls
import (
+ exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
emptypb "google.golang.org/protobuf/types/known/emptypb"
structpb "google.golang.org/protobuf/types/known/structpb"
- exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
)
var (
@@ -64,6 +64,12 @@ func NewAbstractType(name string, paramTypes ...*exprpb.Type) *exprpb.Type {
ParameterTypes: paramTypes}}}
}
+// NewOptionalType constructs an abstract type indicating that the parameterized type
+// may be contained within the object.
+func NewOptionalType(paramType *exprpb.Type) *exprpb.Type {
+ return NewAbstractType("optional", paramType)
+}
+
// NewFunctionType creates a function invocation contract, typically only used
// by type-checking steps after overload resolution.
func NewFunctionType(resultType *exprpb.Type,
diff --git a/vendor/github.com/google/cel-go/checker/env.go b/vendor/github.com/google/cel-go/checker/env.go
index c7eeb04eee..be89d2d68d 100644
--- a/vendor/github.com/google/cel-go/checker/env.go
+++ b/vendor/github.com/google/cel-go/checker/env.go
@@ -226,7 +226,7 @@ func (e *Env) setFunction(decl *exprpb.Decl) []errorMsg {
newOverloads := []*exprpb.Decl_FunctionDecl_Overload{}
for _, overload := range overloads {
existing, found := existingOverloads[overload.GetOverloadId()]
- if !found || !proto.Equal(existing, overload) {
+ if !found || !overloadsEqual(existing, overload) {
newOverloads = append(newOverloads, overload)
}
}
@@ -264,6 +264,31 @@ func (e *Env) isOverloadDisabled(overloadID string) bool {
return found
}
+// overloadsEqual returns whether two overloads have identical signatures.
+//
+// type parameter names are ignored as they may be specified in any order and have no bearing on overload
+// equivalence
+func overloadsEqual(o1, o2 *exprpb.Decl_FunctionDecl_Overload) bool {
+ return o1.GetOverloadId() == o2.GetOverloadId() &&
+ o1.GetIsInstanceFunction() == o2.GetIsInstanceFunction() &&
+ paramsEqual(o1.GetParams(), o2.GetParams()) &&
+ proto.Equal(o1.GetResultType(), o2.GetResultType())
+}
+
+// paramsEqual returns whether two lists have equal length and all types are equal
+func paramsEqual(p1, p2 []*exprpb.Type) bool {
+ if len(p1) != len(p2) {
+ return false
+ }
+ for i, a := range p1 {
+ b := p2[i]
+ if !proto.Equal(a, b) {
+ return false
+ }
+ }
+ return true
+}
+
// sanitizeFunction replaces well-known types referenced by message name with their equivalent
// CEL built-in type instances.
func sanitizeFunction(decl *exprpb.Decl) *exprpb.Decl {
diff --git a/vendor/github.com/google/cel-go/checker/printer.go b/vendor/github.com/google/cel-go/checker/printer.go
index e2ed35be83..0cecc5210d 100644
--- a/vendor/github.com/google/cel-go/checker/printer.go
+++ b/vendor/github.com/google/cel-go/checker/printer.go
@@ -26,7 +26,7 @@ type semanticAdorner struct {
var _ debug.Adorner = &semanticAdorner{}
-func (a *semanticAdorner) GetMetadata(elem interface{}) string {
+func (a *semanticAdorner) GetMetadata(elem any) string {
result := ""
e, isExpr := elem.(*exprpb.Expr)
if !isExpr {
diff --git a/vendor/github.com/google/cel-go/checker/standard.go b/vendor/github.com/google/cel-go/checker/standard.go
index 5b48a9046a..e64337ba44 100644
--- a/vendor/github.com/google/cel-go/checker/standard.go
+++ b/vendor/github.com/google/cel-go/checker/standard.go
@@ -287,6 +287,8 @@ func init() {
decls.NewInstanceOverload(overloads.EndsWithString,
[]*exprpb.Type{decls.String, decls.String}, decls.Bool)),
decls.NewFunction(overloads.Matches,
+ decls.NewOverload(overloads.Matches,
+ []*exprpb.Type{decls.String, decls.String}, decls.Bool),
decls.NewInstanceOverload(overloads.MatchesString,
[]*exprpb.Type{decls.String, decls.String}, decls.Bool)),
decls.NewFunction(overloads.StartsWith,
diff --git a/vendor/github.com/google/cel-go/checker/types.go b/vendor/github.com/google/cel-go/checker/types.go
index 8683797d5b..28d21c9d92 100644
--- a/vendor/github.com/google/cel-go/checker/types.go
+++ b/vendor/github.com/google/cel-go/checker/types.go
@@ -90,6 +90,14 @@ func FormatCheckedType(t *exprpb.Type) string {
return "!error!"
case kindTypeParam:
return t.GetTypeParam()
+ case kindAbstract:
+ at := t.GetAbstractType()
+ params := at.GetParameterTypes()
+ paramStrs := make([]string, len(params))
+ for i, p := range params {
+ paramStrs[i] = FormatCheckedType(p)
+ }
+ return fmt.Sprintf("%s(%s)", at.GetName(), strings.Join(paramStrs, ", "))
}
return t.String()
}
@@ -110,12 +118,39 @@ func isDyn(t *exprpb.Type) bool {
// isDynOrError returns true if the input is either an Error, DYN, or well-known ANY message.
func isDynOrError(t *exprpb.Type) bool {
- switch kindOf(t) {
- case kindError:
- return true
- default:
- return isDyn(t)
+ return isError(t) || isDyn(t)
+}
+
+func isError(t *exprpb.Type) bool {
+ return kindOf(t) == kindError
+}
+
+func isOptional(t *exprpb.Type) bool {
+ if kindOf(t) == kindAbstract {
+ at := t.GetAbstractType()
+ return at.GetName() == "optional"
+ }
+ return false
+}
+
+func maybeUnwrapOptional(t *exprpb.Type) (*exprpb.Type, bool) {
+ if isOptional(t) {
+ at := t.GetAbstractType()
+ return at.GetParameterTypes()[0], true
+ }
+ return t, false
+}
+
+func maybeUnwrapString(e *exprpb.Expr) (string, bool) {
+ switch e.GetExprKind().(type) {
+ case *exprpb.Expr_ConstExpr:
+ literal := e.GetConstExpr()
+ switch literal.GetConstantKind().(type) {
+ case *exprpb.Constant_StringValue:
+ return literal.GetStringValue(), true
+ }
}
+ return "", false
}
// isEqualOrLessSpecific checks whether one type is equal or less specific than the other one.
@@ -236,7 +271,7 @@ func internalIsAssignable(m *mapping, t1 *exprpb.Type, t2 *exprpb.Type) bool {
// substitution for t1, and whether t2 has a type substitution in mapping m.
//
// The type t2 is a valid substitution for t1 if any of the following statements is true
-// - t2 has a type substitition (t2sub) equal to t1
+// - t2 has a type substitution (t2sub) equal to t1
// - t2 has a type substitution (t2sub) assignable to t1
// - t2 does not occur within t1.
func isValidTypeSubstitution(m *mapping, t1, t2 *exprpb.Type) (valid, hasSub bool) {
diff --git a/vendor/github.com/google/cel-go/common/BUILD.bazel b/vendor/github.com/google/cel-go/common/BUILD.bazel
index a0058aebe0..d6165b13af 100644
--- a/vendor/github.com/google/cel-go/common/BUILD.bazel
+++ b/vendor/github.com/google/cel-go/common/BUILD.bazel
@@ -17,7 +17,7 @@ go_library(
importpath = "github.com/google/cel-go/common",
deps = [
"//common/runes:go_default_library",
- "@org_golang_google_genproto//googleapis/api/expr/v1alpha1:go_default_library",
+ "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library",
"@org_golang_x_text//width:go_default_library",
],
)
diff --git a/vendor/github.com/google/cel-go/common/containers/BUILD.bazel b/vendor/github.com/google/cel-go/common/containers/BUILD.bazel
index 18142d94ef..3f3f078871 100644
--- a/vendor/github.com/google/cel-go/common/containers/BUILD.bazel
+++ b/vendor/github.com/google/cel-go/common/containers/BUILD.bazel
@@ -12,7 +12,7 @@ go_library(
],
importpath = "github.com/google/cel-go/common/containers",
deps = [
- "@org_golang_google_genproto//googleapis/api/expr/v1alpha1:go_default_library",
+ "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library",
],
)
@@ -26,6 +26,6 @@ go_test(
":go_default_library",
],
deps = [
- "@org_golang_google_genproto//googleapis/api/expr/v1alpha1:go_default_library",
+ "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library",
],
)
diff --git a/vendor/github.com/google/cel-go/common/debug/BUILD.bazel b/vendor/github.com/google/cel-go/common/debug/BUILD.bazel
index cf5c5d2467..1f029839c7 100644
--- a/vendor/github.com/google/cel-go/common/debug/BUILD.bazel
+++ b/vendor/github.com/google/cel-go/common/debug/BUILD.bazel
@@ -13,6 +13,6 @@ go_library(
importpath = "github.com/google/cel-go/common/debug",
deps = [
"//common:go_default_library",
- "@org_golang_google_genproto//googleapis/api/expr/v1alpha1:go_default_library",
+ "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library",
],
)
diff --git a/vendor/github.com/google/cel-go/common/debug/debug.go b/vendor/github.com/google/cel-go/common/debug/debug.go
index bec885424b..5dab156ef3 100644
--- a/vendor/github.com/google/cel-go/common/debug/debug.go
+++ b/vendor/github.com/google/cel-go/common/debug/debug.go
@@ -29,7 +29,7 @@ import (
// representation of an expression.
type Adorner interface {
// GetMetadata for the input context.
- GetMetadata(ctx interface{}) string
+ GetMetadata(ctx any) string
}
// Writer manages writing expressions to an internal string.
@@ -46,7 +46,7 @@ type emptyDebugAdorner struct {
var emptyAdorner Adorner = &emptyDebugAdorner{}
-func (a *emptyDebugAdorner) GetMetadata(e interface{}) string {
+func (a *emptyDebugAdorner) GetMetadata(e any) string {
return ""
}
@@ -170,6 +170,9 @@ func (w *debugWriter) appendObject(obj *exprpb.Expr_CreateStruct) {
w.append(",")
w.appendLine()
}
+ if entry.GetOptionalEntry() {
+ w.append("?")
+ }
w.append(entry.GetFieldKey())
w.append(":")
w.Buffer(entry.GetValue())
@@ -191,6 +194,9 @@ func (w *debugWriter) appendMap(obj *exprpb.Expr_CreateStruct) {
w.append(",")
w.appendLine()
}
+ if entry.GetOptionalEntry() {
+ w.append("?")
+ }
w.Buffer(entry.GetMapKey())
w.append(":")
w.Buffer(entry.GetValue())
@@ -269,7 +275,7 @@ func (w *debugWriter) append(s string) {
w.buffer.WriteString(s)
}
-func (w *debugWriter) appendFormat(f string, args ...interface{}) {
+func (w *debugWriter) appendFormat(f string, args ...any) {
w.append(fmt.Sprintf(f, args...))
}
@@ -280,7 +286,7 @@ func (w *debugWriter) doIndent() {
}
}
-func (w *debugWriter) adorn(e interface{}) {
+func (w *debugWriter) adorn(e any) {
w.append(w.adorner.GetMetadata(e))
}
diff --git a/vendor/github.com/google/cel-go/common/errors.go b/vendor/github.com/google/cel-go/common/errors.go
index daebba8609..1565085ab9 100644
--- a/vendor/github.com/google/cel-go/common/errors.go
+++ b/vendor/github.com/google/cel-go/common/errors.go
@@ -38,7 +38,7 @@ func NewErrors(source Source) *Errors {
}
// ReportError records an error at a source location.
-func (e *Errors) ReportError(l Location, format string, args ...interface{}) {
+func (e *Errors) ReportError(l Location, format string, args ...any) {
e.numErrors++
if e.numErrors > e.maxErrorsToReport {
return
diff --git a/vendor/github.com/google/cel-go/common/operators/operators.go b/vendor/github.com/google/cel-go/common/operators/operators.go
index fa25dfb7f0..f9b39bda3f 100644
--- a/vendor/github.com/google/cel-go/common/operators/operators.go
+++ b/vendor/github.com/google/cel-go/common/operators/operators.go
@@ -37,6 +37,8 @@ const (
Modulo = "_%_"
Negate = "-_"
Index = "_[_]"
+ OptIndex = "_[?_]"
+ OptSelect = "_?._"
// Macros, must have a valid identifier.
Has = "has"
@@ -99,6 +101,8 @@ var (
LogicalNot: {displayName: "!", precedence: 2, arity: 1},
Negate: {displayName: "-", precedence: 2, arity: 1},
Index: {displayName: "", precedence: 1, arity: 2},
+ OptIndex: {displayName: "", precedence: 1, arity: 2},
+ OptSelect: {displayName: "", precedence: 1, arity: 2},
}
)
diff --git a/vendor/github.com/google/cel-go/common/overloads/overloads.go b/vendor/github.com/google/cel-go/common/overloads/overloads.go
index 9ebaf6fabf..9d50f4367b 100644
--- a/vendor/github.com/google/cel-go/common/overloads/overloads.go
+++ b/vendor/github.com/google/cel-go/common/overloads/overloads.go
@@ -148,6 +148,11 @@ const (
StartsWith = "startsWith"
)
+// Extension function overloads with complex behaviors that need to be referenced in runtime and static analysis cost computations.
+const (
+ ExtQuoteString = "strings_quote"
+)
+
// String function overload names.
const (
ContainsString = "contains_string"
@@ -156,6 +161,11 @@ const (
StartsWithString = "starts_with_string"
)
+// Extension function overloads with complex behaviors that need to be referenced in runtime and static analysis cost computations.
+const (
+ ExtFormatString = "string_format"
+)
+
// Time-based functions.
const (
TimeGetFullYear = "getFullYear"
diff --git a/vendor/github.com/google/cel-go/common/types/BUILD.bazel b/vendor/github.com/google/cel-go/common/types/BUILD.bazel
index 5f1b1cd1fd..89c4feacbf 100644
--- a/vendor/github.com/google/cel-go/common/types/BUILD.bazel
+++ b/vendor/github.com/google/cel-go/common/types/BUILD.bazel
@@ -22,6 +22,7 @@ go_library(
"map.go",
"null.go",
"object.go",
+ "optional.go",
"overflow.go",
"provider.go",
"string.go",
@@ -38,10 +39,8 @@ go_library(
"//common/types/ref:go_default_library",
"//common/types/traits:go_default_library",
"@com_github_stoewer_go_strcase//:go_default_library",
- "@org_golang_google_genproto//googleapis/api/expr/v1alpha1:go_default_library",
- "@org_golang_google_genproto//googleapis/rpc/status:go_default_library",
- "@org_golang_google_grpc//codes:go_default_library",
- "@org_golang_google_grpc//status:go_default_library",
+ "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library",
+ "@org_golang_google_genproto_googleapis_rpc//status:go_default_library",
"@org_golang_google_protobuf//encoding/protojson:go_default_library",
"@org_golang_google_protobuf//proto:go_default_library",
"@org_golang_google_protobuf//reflect/protoreflect:go_default_library",
@@ -68,6 +67,7 @@ go_test(
"map_test.go",
"null_test.go",
"object_test.go",
+ "optional_test.go",
"provider_test.go",
"string_test.go",
"timestamp_test.go",
@@ -80,7 +80,7 @@ go_test(
"//common/types/ref:go_default_library",
"//test:go_default_library",
"//test/proto3pb:test_all_types_go_proto",
- "@org_golang_google_genproto//googleapis/api/expr/v1alpha1:go_default_library",
+ "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library",
"@org_golang_google_protobuf//encoding/protojson:go_default_library",
"@org_golang_google_protobuf//types/known/anypb:go_default_library",
"@org_golang_google_protobuf//types/known/durationpb:go_default_library",
diff --git a/vendor/github.com/google/cel-go/common/types/bool.go b/vendor/github.com/google/cel-go/common/types/bool.go
index 1b55ba9529..a634ecc287 100644
--- a/vendor/github.com/google/cel-go/common/types/bool.go
+++ b/vendor/github.com/google/cel-go/common/types/bool.go
@@ -62,7 +62,7 @@ func (b Bool) Compare(other ref.Val) ref.Val {
}
// ConvertToNative implements the ref.Val interface method.
-func (b Bool) ConvertToNative(typeDesc reflect.Type) (interface{}, error) {
+func (b Bool) ConvertToNative(typeDesc reflect.Type) (any, error) {
switch typeDesc.Kind() {
case reflect.Bool:
return reflect.ValueOf(b).Convert(typeDesc).Interface(), nil
@@ -114,6 +114,11 @@ func (b Bool) Equal(other ref.Val) ref.Val {
return Bool(ok && b == otherBool)
}
+// IsZeroValue returns true if the boolean value is false.
+func (b Bool) IsZeroValue() bool {
+ return b == False
+}
+
// Negate implements the traits.Negater interface method.
func (b Bool) Negate() ref.Val {
return !b
@@ -125,7 +130,7 @@ func (b Bool) Type() ref.Type {
}
// Value implements the ref.Val interface method.
-func (b Bool) Value() interface{} {
+func (b Bool) Value() any {
return bool(b)
}
diff --git a/vendor/github.com/google/cel-go/common/types/bytes.go b/vendor/github.com/google/cel-go/common/types/bytes.go
index 3575717ec7..bef190759f 100644
--- a/vendor/github.com/google/cel-go/common/types/bytes.go
+++ b/vendor/github.com/google/cel-go/common/types/bytes.go
@@ -63,7 +63,7 @@ func (b Bytes) Compare(other ref.Val) ref.Val {
}
// ConvertToNative implements the ref.Val interface method.
-func (b Bytes) ConvertToNative(typeDesc reflect.Type) (interface{}, error) {
+func (b Bytes) ConvertToNative(typeDesc reflect.Type) (any, error) {
switch typeDesc.Kind() {
case reflect.Array, reflect.Slice:
return reflect.ValueOf(b).Convert(typeDesc).Interface(), nil
@@ -116,6 +116,11 @@ func (b Bytes) Equal(other ref.Val) ref.Val {
return Bool(ok && bytes.Equal(b, otherBytes))
}
+// IsZeroValue returns true if the byte array is empty.
+func (b Bytes) IsZeroValue() bool {
+ return len(b) == 0
+}
+
// Size implements the traits.Sizer interface method.
func (b Bytes) Size() ref.Val {
return Int(len(b))
@@ -127,6 +132,6 @@ func (b Bytes) Type() ref.Type {
}
// Value implements the ref.Val interface method.
-func (b Bytes) Value() interface{} {
+func (b Bytes) Value() any {
return []byte(b)
}
diff --git a/vendor/github.com/google/cel-go/common/types/double.go b/vendor/github.com/google/cel-go/common/types/double.go
index a6ec52a0f9..bda9f31a6b 100644
--- a/vendor/github.com/google/cel-go/common/types/double.go
+++ b/vendor/github.com/google/cel-go/common/types/double.go
@@ -78,7 +78,7 @@ func (d Double) Compare(other ref.Val) ref.Val {
}
// ConvertToNative implements ref.Val.ConvertToNative.
-func (d Double) ConvertToNative(typeDesc reflect.Type) (interface{}, error) {
+func (d Double) ConvertToNative(typeDesc reflect.Type) (any, error) {
switch typeDesc.Kind() {
case reflect.Float32:
v := float32(d)
@@ -134,13 +134,13 @@ func (d Double) ConvertToType(typeVal ref.Type) ref.Val {
case IntType:
i, err := doubleToInt64Checked(float64(d))
if err != nil {
- return wrapErr(err)
+ return WrapErr(err)
}
return Int(i)
case UintType:
i, err := doubleToUint64Checked(float64(d))
if err != nil {
- return wrapErr(err)
+ return WrapErr(err)
}
return Uint(i)
case DoubleType:
@@ -182,6 +182,11 @@ func (d Double) Equal(other ref.Val) ref.Val {
}
}
+// IsZeroValue returns true if double value is 0.0
+func (d Double) IsZeroValue() bool {
+ return float64(d) == 0.0
+}
+
// Multiply implements traits.Multiplier.Multiply.
func (d Double) Multiply(other ref.Val) ref.Val {
otherDouble, ok := other.(Double)
@@ -211,6 +216,6 @@ func (d Double) Type() ref.Type {
}
// Value implements ref.Val.Value.
-func (d Double) Value() interface{} {
+func (d Double) Value() any {
return float64(d)
}
diff --git a/vendor/github.com/google/cel-go/common/types/duration.go b/vendor/github.com/google/cel-go/common/types/duration.go
index 418349fa6c..c90ac1bee9 100644
--- a/vendor/github.com/google/cel-go/common/types/duration.go
+++ b/vendor/github.com/google/cel-go/common/types/duration.go
@@ -57,14 +57,14 @@ func (d Duration) Add(other ref.Val) ref.Val {
dur2 := other.(Duration)
val, err := addDurationChecked(d.Duration, dur2.Duration)
if err != nil {
- return wrapErr(err)
+ return WrapErr(err)
}
return durationOf(val)
case TimestampType:
ts := other.(Timestamp).Time
val, err := addTimeDurationChecked(ts, d.Duration)
if err != nil {
- return wrapErr(err)
+ return WrapErr(err)
}
return timestampOf(val)
}
@@ -90,7 +90,7 @@ func (d Duration) Compare(other ref.Val) ref.Val {
}
// ConvertToNative implements ref.Val.ConvertToNative.
-func (d Duration) ConvertToNative(typeDesc reflect.Type) (interface{}, error) {
+func (d Duration) ConvertToNative(typeDesc reflect.Type) (any, error) {
// If the duration is already assignable to the desired type return it.
if reflect.TypeOf(d.Duration).AssignableTo(typeDesc) {
return d.Duration, nil
@@ -138,11 +138,16 @@ func (d Duration) Equal(other ref.Val) ref.Val {
return Bool(ok && d.Duration == otherDur.Duration)
}
+// IsZeroValue returns true if the duration value is zero
+func (d Duration) IsZeroValue() bool {
+ return d.Duration == 0
+}
+
// Negate implements traits.Negater.Negate.
func (d Duration) Negate() ref.Val {
val, err := negateDurationChecked(d.Duration)
if err != nil {
- return wrapErr(err)
+ return WrapErr(err)
}
return durationOf(val)
}
@@ -165,7 +170,7 @@ func (d Duration) Subtract(subtrahend ref.Val) ref.Val {
}
val, err := subtractDurationChecked(d.Duration, subtraDur.Duration)
if err != nil {
- return wrapErr(err)
+ return WrapErr(err)
}
return durationOf(val)
}
@@ -176,7 +181,7 @@ func (d Duration) Type() ref.Type {
}
// Value implements ref.Val.Value.
-func (d Duration) Value() interface{} {
+func (d Duration) Value() any {
return d.Duration
}
diff --git a/vendor/github.com/google/cel-go/common/types/err.go b/vendor/github.com/google/cel-go/common/types/err.go
index 93d79cdcbc..b4874d9d4d 100644
--- a/vendor/github.com/google/cel-go/common/types/err.go
+++ b/vendor/github.com/google/cel-go/common/types/err.go
@@ -22,6 +22,12 @@ import (
"github.com/google/cel-go/common/types/ref"
)
+// Error interface which allows types types.Err values to be treated as error values.
+type Error interface {
+ error
+ ref.Val
+}
+
// Err type which extends the built-in go error and implements ref.Val.
type Err struct {
error
@@ -51,7 +57,7 @@ var (
// NewErr creates a new Err described by the format string and args.
// TODO: Audit the use of this function and standardize the error messages and codes.
-func NewErr(format string, args ...interface{}) ref.Val {
+func NewErr(format string, args ...any) ref.Val {
return &Err{fmt.Errorf(format, args...)}
}
@@ -62,7 +68,7 @@ func NoSuchOverloadErr() ref.Val {
// UnsupportedRefValConversionErr returns a types.NewErr instance with a no such conversion
// message that indicates that the native value could not be converted to a CEL ref.Val.
-func UnsupportedRefValConversionErr(val interface{}) ref.Val {
+func UnsupportedRefValConversionErr(val any) ref.Val {
return NewErr("unsupported conversion to ref.Val: (%T)%v", val, val)
}
@@ -74,20 +80,20 @@ func MaybeNoSuchOverloadErr(val ref.Val) ref.Val {
// ValOrErr either returns the existing error or creates a new one.
// TODO: Audit the use of this function and standardize the error messages and codes.
-func ValOrErr(val ref.Val, format string, args ...interface{}) ref.Val {
+func ValOrErr(val ref.Val, format string, args ...any) ref.Val {
if val == nil || !IsUnknownOrError(val) {
return NewErr(format, args...)
}
return val
}
-// wrapErr wraps an existing Go error value into a CEL Err value.
-func wrapErr(err error) ref.Val {
+// WrapErr wraps an existing Go error value into a CEL Err value.
+func WrapErr(err error) ref.Val {
return &Err{error: err}
}
// ConvertToNative implements ref.Val.ConvertToNative.
-func (e *Err) ConvertToNative(typeDesc reflect.Type) (interface{}, error) {
+func (e *Err) ConvertToNative(typeDesc reflect.Type) (any, error) {
return nil, e.error
}
@@ -114,10 +120,15 @@ func (e *Err) Type() ref.Type {
}
// Value implements ref.Val.Value.
-func (e *Err) Value() interface{} {
+func (e *Err) Value() any {
return e.error
}
+// Is implements errors.Is.
+func (e *Err) Is(target error) bool {
+ return e.error.Error() == target.Error()
+}
+
// IsError returns whether the input element ref.Type or ref.Val is equal to
// the ErrType singleton.
func IsError(val ref.Val) bool {
diff --git a/vendor/github.com/google/cel-go/common/types/int.go b/vendor/github.com/google/cel-go/common/types/int.go
index 95f25dcd80..f5a9511c8d 100644
--- a/vendor/github.com/google/cel-go/common/types/int.go
+++ b/vendor/github.com/google/cel-go/common/types/int.go
@@ -66,7 +66,7 @@ func (i Int) Add(other ref.Val) ref.Val {
}
val, err := addInt64Checked(int64(i), int64(otherInt))
if err != nil {
- return wrapErr(err)
+ return WrapErr(err)
}
return Int(val)
}
@@ -89,7 +89,7 @@ func (i Int) Compare(other ref.Val) ref.Val {
}
// ConvertToNative implements ref.Val.ConvertToNative.
-func (i Int) ConvertToNative(typeDesc reflect.Type) (interface{}, error) {
+func (i Int) ConvertToNative(typeDesc reflect.Type) (any, error) {
switch typeDesc.Kind() {
case reflect.Int, reflect.Int32:
// Enums are also mapped as int32 derivations.
@@ -176,7 +176,7 @@ func (i Int) ConvertToType(typeVal ref.Type) ref.Val {
case UintType:
u, err := int64ToUint64Checked(int64(i))
if err != nil {
- return wrapErr(err)
+ return WrapErr(err)
}
return Uint(u)
case DoubleType:
@@ -204,7 +204,7 @@ func (i Int) Divide(other ref.Val) ref.Val {
}
val, err := divideInt64Checked(int64(i), int64(otherInt))
if err != nil {
- return wrapErr(err)
+ return WrapErr(err)
}
return Int(val)
}
@@ -226,6 +226,11 @@ func (i Int) Equal(other ref.Val) ref.Val {
}
}
+// IsZeroValue returns true if integer is equal to 0
+func (i Int) IsZeroValue() bool {
+ return i == IntZero
+}
+
// Modulo implements traits.Modder.Modulo.
func (i Int) Modulo(other ref.Val) ref.Val {
otherInt, ok := other.(Int)
@@ -234,7 +239,7 @@ func (i Int) Modulo(other ref.Val) ref.Val {
}
val, err := moduloInt64Checked(int64(i), int64(otherInt))
if err != nil {
- return wrapErr(err)
+ return WrapErr(err)
}
return Int(val)
}
@@ -247,7 +252,7 @@ func (i Int) Multiply(other ref.Val) ref.Val {
}
val, err := multiplyInt64Checked(int64(i), int64(otherInt))
if err != nil {
- return wrapErr(err)
+ return WrapErr(err)
}
return Int(val)
}
@@ -256,7 +261,7 @@ func (i Int) Multiply(other ref.Val) ref.Val {
func (i Int) Negate() ref.Val {
val, err := negateInt64Checked(int64(i))
if err != nil {
- return wrapErr(err)
+ return WrapErr(err)
}
return Int(val)
}
@@ -269,7 +274,7 @@ func (i Int) Subtract(subtrahend ref.Val) ref.Val {
}
val, err := subtractInt64Checked(int64(i), int64(subtraInt))
if err != nil {
- return wrapErr(err)
+ return WrapErr(err)
}
return Int(val)
}
@@ -280,7 +285,7 @@ func (i Int) Type() ref.Type {
}
// Value implements ref.Val.Value.
-func (i Int) Value() interface{} {
+func (i Int) Value() any {
return int64(i)
}
diff --git a/vendor/github.com/google/cel-go/common/types/iterator.go b/vendor/github.com/google/cel-go/common/types/iterator.go
index 4906627783..9f224ad4ff 100644
--- a/vendor/github.com/google/cel-go/common/types/iterator.go
+++ b/vendor/github.com/google/cel-go/common/types/iterator.go
@@ -34,7 +34,7 @@ var (
// interpreter.
type baseIterator struct{}
-func (*baseIterator) ConvertToNative(typeDesc reflect.Type) (interface{}, error) {
+func (*baseIterator) ConvertToNative(typeDesc reflect.Type) (any, error) {
return nil, fmt.Errorf("type conversion on iterators not supported")
}
@@ -50,6 +50,6 @@ func (*baseIterator) Type() ref.Type {
return IteratorType
}
-func (*baseIterator) Value() interface{} {
+func (*baseIterator) Value() any {
return nil
}
diff --git a/vendor/github.com/google/cel-go/common/types/json_value.go b/vendor/github.com/google/cel-go/common/types/json_value.go
index cd63b51944..13a4efe7ad 100644
--- a/vendor/github.com/google/cel-go/common/types/json_value.go
+++ b/vendor/github.com/google/cel-go/common/types/json_value.go
@@ -25,4 +25,5 @@ var (
jsonValueType = reflect.TypeOf(&structpb.Value{})
jsonListValueType = reflect.TypeOf(&structpb.ListValue{})
jsonStructType = reflect.TypeOf(&structpb.Struct{})
+ jsonNullType = reflect.TypeOf(structpb.NullValue_NULL_VALUE)
)
diff --git a/vendor/github.com/google/cel-go/common/types/list.go b/vendor/github.com/google/cel-go/common/types/list.go
index 7230f7ea12..de5f2099bf 100644
--- a/vendor/github.com/google/cel-go/common/types/list.go
+++ b/vendor/github.com/google/cel-go/common/types/list.go
@@ -17,12 +17,14 @@ package types
import (
"fmt"
"reflect"
+ "strings"
- "github.com/google/cel-go/common/types/ref"
- "github.com/google/cel-go/common/types/traits"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/reflect/protoreflect"
+ "github.com/google/cel-go/common/types/ref"
+ "github.com/google/cel-go/common/types/traits"
+
anypb "google.golang.org/protobuf/types/known/anypb"
structpb "google.golang.org/protobuf/types/known/structpb"
)
@@ -40,13 +42,13 @@ var (
// NewDynamicList returns a traits.Lister with heterogenous elements.
// value should be an array of "native" types, i.e. any type that
// NativeToValue() can convert to a ref.Val.
-func NewDynamicList(adapter ref.TypeAdapter, value interface{}) traits.Lister {
+func NewDynamicList(adapter ref.TypeAdapter, value any) traits.Lister {
refValue := reflect.ValueOf(value)
return &baseList{
TypeAdapter: adapter,
value: value,
size: refValue.Len(),
- get: func(i int) interface{} {
+ get: func(i int) any {
return refValue.Index(i).Interface()
},
}
@@ -58,7 +60,7 @@ func NewStringList(adapter ref.TypeAdapter, elems []string) traits.Lister {
TypeAdapter: adapter,
value: elems,
size: len(elems),
- get: func(i int) interface{} { return elems[i] },
+ get: func(i int) any { return elems[i] },
}
}
@@ -70,7 +72,7 @@ func NewRefValList(adapter ref.TypeAdapter, elems []ref.Val) traits.Lister {
TypeAdapter: adapter,
value: elems,
size: len(elems),
- get: func(i int) interface{} { return elems[i] },
+ get: func(i int) any { return elems[i] },
}
}
@@ -80,7 +82,7 @@ func NewProtoList(adapter ref.TypeAdapter, list protoreflect.List) traits.Lister
TypeAdapter: adapter,
value: list,
size: list.Len(),
- get: func(i int) interface{} { return list.Get(i).Interface() },
+ get: func(i int) any { return list.Get(i).Interface() },
}
}
@@ -91,22 +93,25 @@ func NewJSONList(adapter ref.TypeAdapter, l *structpb.ListValue) traits.Lister {
TypeAdapter: adapter,
value: l,
size: len(vals),
- get: func(i int) interface{} { return vals[i] },
+ get: func(i int) any { return vals[i] },
}
}
// NewMutableList creates a new mutable list whose internal state can be modified.
func NewMutableList(adapter ref.TypeAdapter) traits.MutableLister {
var mutableValues []ref.Val
- return &mutableList{
+ l := &mutableList{
baseList: &baseList{
TypeAdapter: adapter,
value: mutableValues,
size: 0,
- get: func(i int) interface{} { return mutableValues[i] },
},
mutableValues: mutableValues,
}
+ l.get = func(i int) any {
+ return l.mutableValues[i]
+ }
+ return l
}
// baseList points to a list containing elements of any type.
@@ -114,7 +119,7 @@ func NewMutableList(adapter ref.TypeAdapter) traits.MutableLister {
// The `ref.TypeAdapter` enables native type to CEL type conversions.
type baseList struct {
ref.TypeAdapter
- value interface{}
+ value any
// size indicates the number of elements within the list.
// Since objects are immutable the size of a list is static.
@@ -122,7 +127,7 @@ type baseList struct {
// get returns a value at the specified integer index.
// The index is guaranteed to be checked against the list index range.
- get func(int) interface{}
+ get func(int) any
}
// Add implements the traits.Adder interface method.
@@ -157,7 +162,7 @@ func (l *baseList) Contains(elem ref.Val) ref.Val {
}
// ConvertToNative implements the ref.Val interface method.
-func (l *baseList) ConvertToNative(typeDesc reflect.Type) (interface{}, error) {
+func (l *baseList) ConvertToNative(typeDesc reflect.Type) (any, error) {
// If the underlying list value is assignable to the reflected type return it.
if reflect.TypeOf(l.value).AssignableTo(typeDesc) {
return l.value, nil
@@ -240,7 +245,7 @@ func (l *baseList) Equal(other ref.Val) ref.Val {
// Get implements the traits.Indexer interface method.
func (l *baseList) Get(index ref.Val) ref.Val {
- ind, err := indexOrError(index)
+ ind, err := IndexOrError(index)
if err != nil {
return ValOrErr(index, err.Error())
}
@@ -250,6 +255,11 @@ func (l *baseList) Get(index ref.Val) ref.Val {
return l.NativeToValue(l.get(ind))
}
+// IsZeroValue returns true if the list is empty.
+func (l *baseList) IsZeroValue() bool {
+ return l.size == 0
+}
+
// Iterator implements the traits.Iterable interface method.
func (l *baseList) Iterator() traits.Iterator {
return newListIterator(l)
@@ -266,10 +276,24 @@ func (l *baseList) Type() ref.Type {
}
// Value implements the ref.Val interface method.
-func (l *baseList) Value() interface{} {
+func (l *baseList) Value() any {
return l.value
}
+// String converts the list to a human readable string form.
+func (l *baseList) String() string {
+ var sb strings.Builder
+ sb.WriteString("[")
+ for i := 0; i < l.size; i++ {
+ sb.WriteString(fmt.Sprintf("%v", l.get(i)))
+ if i != l.size-1 {
+ sb.WriteString(", ")
+ }
+ }
+ sb.WriteString("]")
+ return sb.String()
+}
+
// mutableList aggregates values into its internal storage. For use with internal CEL variables only.
type mutableList struct {
*baseList
@@ -305,7 +329,7 @@ func (l *mutableList) ToImmutableList() traits.Lister {
// The `ref.TypeAdapter` enables native type to CEL type conversions.
type concatList struct {
ref.TypeAdapter
- value interface{}
+ value any
prevList traits.Lister
nextList traits.Lister
}
@@ -351,8 +375,8 @@ func (l *concatList) Contains(elem ref.Val) ref.Val {
}
// ConvertToNative implements the ref.Val interface method.
-func (l *concatList) ConvertToNative(typeDesc reflect.Type) (interface{}, error) {
- combined := NewDynamicList(l.TypeAdapter, l.Value().([]interface{}))
+func (l *concatList) ConvertToNative(typeDesc reflect.Type) (any, error) {
+ combined := NewDynamicList(l.TypeAdapter, l.Value().([]any))
return combined.ConvertToNative(typeDesc)
}
@@ -396,7 +420,7 @@ func (l *concatList) Equal(other ref.Val) ref.Val {
// Get implements the traits.Indexer interface method.
func (l *concatList) Get(index ref.Val) ref.Val {
- ind, err := indexOrError(index)
+ ind, err := IndexOrError(index)
if err != nil {
return ValOrErr(index, err.Error())
}
@@ -408,6 +432,11 @@ func (l *concatList) Get(index ref.Val) ref.Val {
return l.nextList.Get(offset)
}
+// IsZeroValue returns true if the list is empty.
+func (l *concatList) IsZeroValue() bool {
+ return l.Size().(Int) == 0
+}
+
// Iterator implements the traits.Iterable interface method.
func (l *concatList) Iterator() traits.Iterator {
return newListIterator(l)
@@ -418,15 +447,29 @@ func (l *concatList) Size() ref.Val {
return l.prevList.Size().(Int).Add(l.nextList.Size())
}
+// String converts the concatenated list to a human-readable string.
+func (l *concatList) String() string {
+ var sb strings.Builder
+ sb.WriteString("[")
+ for i := Int(0); i < l.Size().(Int); i++ {
+ sb.WriteString(fmt.Sprintf("%v", l.Get(i)))
+ if i != l.Size().(Int)-1 {
+ sb.WriteString(", ")
+ }
+ }
+ sb.WriteString("]")
+ return sb.String()
+}
+
// Type implements the ref.Val interface method.
func (l *concatList) Type() ref.Type {
return ListType
}
// Value implements the ref.Val interface method.
-func (l *concatList) Value() interface{} {
+func (l *concatList) Value() any {
if l.value == nil {
- merged := make([]interface{}, l.Size().(Int))
+ merged := make([]any, l.Size().(Int))
prevLen := l.prevList.Size().(Int)
for i := Int(0); i < prevLen; i++ {
merged[i] = l.prevList.Get(i).Value()
@@ -469,7 +512,8 @@ func (it *listIterator) Next() ref.Val {
return nil
}
-func indexOrError(index ref.Val) (int, error) {
+// IndexOrError converts an input index value into either a lossless integer index or an error.
+func IndexOrError(index ref.Val) (int, error) {
switch iv := index.(type) {
case Int:
return int(iv), nil
diff --git a/vendor/github.com/google/cel-go/common/types/map.go b/vendor/github.com/google/cel-go/common/types/map.go
index 5865594024..213be4ac9e 100644
--- a/vendor/github.com/google/cel-go/common/types/map.go
+++ b/vendor/github.com/google/cel-go/common/types/map.go
@@ -17,20 +17,22 @@ package types
import (
"fmt"
"reflect"
+ "strings"
- "github.com/google/cel-go/common/types/pb"
- "github.com/google/cel-go/common/types/ref"
- "github.com/google/cel-go/common/types/traits"
"github.com/stoewer/go-strcase"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/reflect/protoreflect"
+ "github.com/google/cel-go/common/types/pb"
+ "github.com/google/cel-go/common/types/ref"
+ "github.com/google/cel-go/common/types/traits"
+
anypb "google.golang.org/protobuf/types/known/anypb"
structpb "google.golang.org/protobuf/types/known/structpb"
)
// NewDynamicMap returns a traits.Mapper value with dynamic key, value pairs.
-func NewDynamicMap(adapter ref.TypeAdapter, value interface{}) traits.Mapper {
+func NewDynamicMap(adapter ref.TypeAdapter, value any) traits.Mapper {
refValue := reflect.ValueOf(value)
return &baseMap{
TypeAdapter: adapter,
@@ -65,7 +67,7 @@ func NewRefValMap(adapter ref.TypeAdapter, value map[ref.Val]ref.Val) traits.Map
}
// NewStringInterfaceMap returns a specialized traits.Mapper with string keys and interface values.
-func NewStringInterfaceMap(adapter ref.TypeAdapter, value map[string]interface{}) traits.Mapper {
+func NewStringInterfaceMap(adapter ref.TypeAdapter, value map[string]any) traits.Mapper {
return &baseMap{
TypeAdapter: adapter,
mapAccessor: newStringIfaceMapAccessor(adapter, value),
@@ -125,7 +127,7 @@ type baseMap struct {
mapAccessor
// value is the native Go value upon which the map type operators.
- value interface{}
+ value any
// size is the number of entries in the map.
size int
@@ -138,7 +140,7 @@ func (m *baseMap) Contains(index ref.Val) ref.Val {
}
// ConvertToNative implements the ref.Val interface method.
-func (m *baseMap) ConvertToNative(typeDesc reflect.Type) (interface{}, error) {
+func (m *baseMap) ConvertToNative(typeDesc reflect.Type) (any, error) {
// If the map is already assignable to the desired type return it, e.g. interfaces and
// maps with the same key value types.
if reflect.TypeOf(m.value).AssignableTo(typeDesc) {
@@ -275,18 +277,42 @@ func (m *baseMap) Get(key ref.Val) ref.Val {
return v
}
+// IsZeroValue returns true if the map is empty.
+func (m *baseMap) IsZeroValue() bool {
+ return m.size == 0
+}
+
// Size implements the traits.Sizer interface method.
func (m *baseMap) Size() ref.Val {
return Int(m.size)
}
+// String converts the map into a human-readable string.
+func (m *baseMap) String() string {
+ var sb strings.Builder
+ sb.WriteString("{")
+ it := m.Iterator()
+ i := 0
+ for it.HasNext() == True {
+ k := it.Next()
+ v, _ := m.Find(k)
+ sb.WriteString(fmt.Sprintf("%v: %v", k, v))
+ if i != m.size-1 {
+ sb.WriteString(", ")
+ }
+ i++
+ }
+ sb.WriteString("}")
+ return sb.String()
+}
+
// Type implements the ref.Val interface method.
func (m *baseMap) Type() ref.Type {
return MapType
}
// Value implements the ref.Val interface method.
-func (m *baseMap) Value() interface{} {
+func (m *baseMap) Value() any {
return m.value
}
@@ -498,7 +524,7 @@ func (a *stringMapAccessor) Iterator() traits.Iterator {
}
}
-func newStringIfaceMapAccessor(adapter ref.TypeAdapter, mapVal map[string]interface{}) mapAccessor {
+func newStringIfaceMapAccessor(adapter ref.TypeAdapter, mapVal map[string]any) mapAccessor {
return &stringIfaceMapAccessor{
TypeAdapter: adapter,
mapVal: mapVal,
@@ -507,7 +533,7 @@ func newStringIfaceMapAccessor(adapter ref.TypeAdapter, mapVal map[string]interf
type stringIfaceMapAccessor struct {
ref.TypeAdapter
- mapVal map[string]interface{}
+ mapVal map[string]any
}
// Find uses native map accesses to find the key, returning (value, true) if present.
@@ -556,7 +582,7 @@ func (m *protoMap) Contains(key ref.Val) ref.Val {
// ConvertToNative implements the ref.Val interface method.
//
// Note, assignment to Golang struct types is not yet supported.
-func (m *protoMap) ConvertToNative(typeDesc reflect.Type) (interface{}, error) {
+func (m *protoMap) ConvertToNative(typeDesc reflect.Type) (any, error) {
// If the map is already assignable to the desired type return it, e.g. interfaces and
// maps with the same key value types.
switch typeDesc {
@@ -601,9 +627,9 @@ func (m *protoMap) ConvertToNative(typeDesc reflect.Type) (interface{}, error) {
m.value.Range(func(key protoreflect.MapKey, val protoreflect.Value) bool {
ntvKey := key.Interface()
ntvVal := val.Interface()
- switch ntvVal.(type) {
+ switch pv := ntvVal.(type) {
case protoreflect.Message:
- ntvVal = ntvVal.(protoreflect.Message).Interface()
+ ntvVal = pv.Interface()
}
if keyType == otherKeyType && valType == otherValType {
mapVal.SetMapIndex(reflect.ValueOf(ntvKey), reflect.ValueOf(ntvVal))
@@ -732,6 +758,11 @@ func (m *protoMap) Get(key ref.Val) ref.Val {
return v
}
+// IsZeroValue returns true if the map is empty.
+func (m *protoMap) IsZeroValue() bool {
+ return m.value.Len() == 0
+}
+
// Iterator implements the traits.Iterable interface method.
func (m *protoMap) Iterator() traits.Iterator {
// Copy the keys to make their order stable.
@@ -758,7 +789,7 @@ func (m *protoMap) Type() ref.Type {
}
// Value implements the ref.Val interface method.
-func (m *protoMap) Value() interface{} {
+func (m *protoMap) Value() any {
return m.value
}
diff --git a/vendor/github.com/google/cel-go/common/types/null.go b/vendor/github.com/google/cel-go/common/types/null.go
index 3d3503c275..38927a112c 100644
--- a/vendor/github.com/google/cel-go/common/types/null.go
+++ b/vendor/github.com/google/cel-go/common/types/null.go
@@ -18,9 +18,10 @@ import (
"fmt"
"reflect"
- "github.com/google/cel-go/common/types/ref"
"google.golang.org/protobuf/proto"
+ "github.com/google/cel-go/common/types/ref"
+
anypb "google.golang.org/protobuf/types/known/anypb"
structpb "google.golang.org/protobuf/types/known/structpb"
)
@@ -34,14 +35,20 @@ var (
// NullValue singleton.
NullValue = Null(structpb.NullValue_NULL_VALUE)
- jsonNullType = reflect.TypeOf(structpb.NullValue_NULL_VALUE)
+ // golang reflect type for Null values.
+ nullReflectType = reflect.TypeOf(NullValue)
)
// ConvertToNative implements ref.Val.ConvertToNative.
-func (n Null) ConvertToNative(typeDesc reflect.Type) (interface{}, error) {
+func (n Null) ConvertToNative(typeDesc reflect.Type) (any, error) {
switch typeDesc.Kind() {
case reflect.Int32:
- return reflect.ValueOf(n).Convert(typeDesc).Interface(), nil
+ switch typeDesc {
+ case jsonNullType:
+ return structpb.NullValue_NULL_VALUE, nil
+ case nullReflectType:
+ return n, nil
+ }
case reflect.Ptr:
switch typeDesc {
case anyValueType:
@@ -54,6 +61,10 @@ func (n Null) ConvertToNative(typeDesc reflect.Type) (interface{}, error) {
return anypb.New(pb.(proto.Message))
case jsonValueType:
return structpb.NewNullValue(), nil
+ case boolWrapperType, byteWrapperType, doubleWrapperType, floatWrapperType,
+ int32WrapperType, int64WrapperType, stringWrapperType, uint32WrapperType,
+ uint64WrapperType:
+ return nil, nil
}
case reflect.Interface:
nv := n.Value()
@@ -86,12 +97,17 @@ func (n Null) Equal(other ref.Val) ref.Val {
return Bool(NullType == other.Type())
}
+// IsZeroValue returns true as null always represents an absent value.
+func (n Null) IsZeroValue() bool {
+ return true
+}
+
// Type implements ref.Val.Type.
func (n Null) Type() ref.Type {
return NullType
}
// Value implements ref.Val.Value.
-func (n Null) Value() interface{} {
+func (n Null) Value() any {
return structpb.NullValue_NULL_VALUE
}
diff --git a/vendor/github.com/google/cel-go/common/types/object.go b/vendor/github.com/google/cel-go/common/types/object.go
index 5faf855110..9955e2dce5 100644
--- a/vendor/github.com/google/cel-go/common/types/object.go
+++ b/vendor/github.com/google/cel-go/common/types/object.go
@@ -18,11 +18,12 @@ import (
"fmt"
"reflect"
- "github.com/google/cel-go/common/types/pb"
- "github.com/google/cel-go/common/types/ref"
"google.golang.org/protobuf/encoding/protojson"
"google.golang.org/protobuf/proto"
+ "github.com/google/cel-go/common/types/pb"
+ "github.com/google/cel-go/common/types/ref"
+
anypb "google.golang.org/protobuf/types/known/anypb"
structpb "google.golang.org/protobuf/types/known/structpb"
)
@@ -52,7 +53,7 @@ func NewObject(adapter ref.TypeAdapter,
typeValue: typeValue}
}
-func (o *protoObj) ConvertToNative(typeDesc reflect.Type) (interface{}, error) {
+func (o *protoObj) ConvertToNative(typeDesc reflect.Type) (any, error) {
srcPB := o.value
if reflect.TypeOf(srcPB).AssignableTo(typeDesc) {
return srcPB, nil
@@ -133,6 +134,11 @@ func (o *protoObj) IsSet(field ref.Val) ref.Val {
return False
}
+// IsZeroValue returns true if the protobuf object is empty.
+func (o *protoObj) IsZeroValue() bool {
+ return proto.Equal(o.value, o.typeDesc.Zero())
+}
+
func (o *protoObj) Get(index ref.Val) ref.Val {
protoFieldName, ok := index.(String)
if !ok {
@@ -154,6 +160,6 @@ func (o *protoObj) Type() ref.Type {
return o.typeValue
}
-func (o *protoObj) Value() interface{} {
+func (o *protoObj) Value() any {
return o.value
}
diff --git a/vendor/github.com/google/cel-go/common/types/optional.go b/vendor/github.com/google/cel-go/common/types/optional.go
new file mode 100644
index 0000000000..54cb35b1ab
--- /dev/null
+++ b/vendor/github.com/google/cel-go/common/types/optional.go
@@ -0,0 +1,108 @@
+// Copyright 2022 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+
+ "github.com/google/cel-go/common/types/ref"
+)
+
+var (
+ // OptionalType indicates the runtime type of an optional value.
+ OptionalType = NewTypeValue("optional")
+
+ // OptionalNone is a sentinel value which is used to indicate an empty optional value.
+ OptionalNone = &Optional{}
+)
+
+// OptionalOf returns an optional value which wraps a concrete CEL value.
+func OptionalOf(value ref.Val) *Optional {
+ return &Optional{value: value}
+}
+
+// Optional value which points to a value if non-empty.
+type Optional struct {
+ value ref.Val
+}
+
+// HasValue returns true if the optional has a value.
+func (o *Optional) HasValue() bool {
+ return o.value != nil
+}
+
+// GetValue returns the wrapped value contained in the optional.
+func (o *Optional) GetValue() ref.Val {
+ if !o.HasValue() {
+ return NewErr("optional.none() dereference")
+ }
+ return o.value
+}
+
+// ConvertToNative implements the ref.Val interface method.
+func (o *Optional) ConvertToNative(typeDesc reflect.Type) (any, error) {
+ if !o.HasValue() {
+ return nil, errors.New("optional.none() dereference")
+ }
+ return o.value.ConvertToNative(typeDesc)
+}
+
+// ConvertToType implements the ref.Val interface method.
+func (o *Optional) ConvertToType(typeVal ref.Type) ref.Val {
+ switch typeVal {
+ case OptionalType:
+ return o
+ case TypeType:
+ return OptionalType
+ }
+ return NewErr("type conversion error from '%s' to '%s'", OptionalType, typeVal)
+}
+
+// Equal determines whether the values contained by two optional values are equal.
+func (o *Optional) Equal(other ref.Val) ref.Val {
+ otherOpt, isOpt := other.(*Optional)
+ if !isOpt {
+ return False
+ }
+ if !o.HasValue() {
+ return Bool(!otherOpt.HasValue())
+ }
+ if !otherOpt.HasValue() {
+ return False
+ }
+ return o.value.Equal(otherOpt.value)
+}
+
+func (o *Optional) String() string {
+ if o.HasValue() {
+ return fmt.Sprintf("optional(%v)", o.GetValue())
+ }
+ return "optional.none()"
+}
+
+// Type implements the ref.Val interface method.
+func (o *Optional) Type() ref.Type {
+ return OptionalType
+}
+
+// Value returns the underlying 'Value()' of the wrapped value, if present.
+func (o *Optional) Value() any {
+ if o.value == nil {
+ return nil
+ }
+ return o.value.Value()
+}
diff --git a/vendor/github.com/google/cel-go/common/types/pb/BUILD.bazel b/vendor/github.com/google/cel-go/common/types/pb/BUILD.bazel
index f23ac9c0e2..e2b9d37b56 100644
--- a/vendor/github.com/google/cel-go/common/types/pb/BUILD.bazel
+++ b/vendor/github.com/google/cel-go/common/types/pb/BUILD.bazel
@@ -17,7 +17,7 @@ go_library(
],
importpath = "github.com/google/cel-go/common/types/pb",
deps = [
- "@org_golang_google_genproto//googleapis/api/expr/v1alpha1:go_default_library",
+ "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library",
"@org_golang_google_protobuf//encoding/protowire:go_default_library",
"@org_golang_google_protobuf//proto:go_default_library",
"@org_golang_google_protobuf//reflect/protoreflect:go_default_library",
diff --git a/vendor/github.com/google/cel-go/common/types/pb/enum.go b/vendor/github.com/google/cel-go/common/types/pb/enum.go
index 4a26b5c7c3..09a1546308 100644
--- a/vendor/github.com/google/cel-go/common/types/pb/enum.go
+++ b/vendor/github.com/google/cel-go/common/types/pb/enum.go
@@ -18,9 +18,9 @@ import (
"google.golang.org/protobuf/reflect/protoreflect"
)
-// NewEnumValueDescription produces an enum value description with the fully qualified enum value
+// newEnumValueDescription produces an enum value description with the fully qualified enum value
// name and the enum value descriptor.
-func NewEnumValueDescription(name string, desc protoreflect.EnumValueDescriptor) *EnumValueDescription {
+func newEnumValueDescription(name string, desc protoreflect.EnumValueDescriptor) *EnumValueDescription {
return &EnumValueDescription{
enumValueName: name,
desc: desc,
diff --git a/vendor/github.com/google/cel-go/common/types/pb/file.go b/vendor/github.com/google/cel-go/common/types/pb/file.go
index 0bcade75f9..e323afb1df 100644
--- a/vendor/github.com/google/cel-go/common/types/pb/file.go
+++ b/vendor/github.com/google/cel-go/common/types/pb/file.go
@@ -18,32 +18,66 @@ import (
"fmt"
"google.golang.org/protobuf/reflect/protoreflect"
+
+ dynamicpb "google.golang.org/protobuf/types/dynamicpb"
)
-// NewFileDescription returns a FileDescription instance with a complete listing of all the message
-// types and enum values declared within any scope in the file.
-func NewFileDescription(fileDesc protoreflect.FileDescriptor, pbdb *Db) *FileDescription {
+// newFileDescription returns a FileDescription instance with a complete listing of all the message
+// types and enum values, as well as a map of extensions declared within any scope in the file.
+func newFileDescription(fileDesc protoreflect.FileDescriptor, pbdb *Db) (*FileDescription, extensionMap) {
metadata := collectFileMetadata(fileDesc)
enums := make(map[string]*EnumValueDescription)
for name, enumVal := range metadata.enumValues {
- enums[name] = NewEnumValueDescription(name, enumVal)
+ enums[name] = newEnumValueDescription(name, enumVal)
}
types := make(map[string]*TypeDescription)
for name, msgType := range metadata.msgTypes {
- types[name] = NewTypeDescription(name, msgType)
+ types[name] = newTypeDescription(name, msgType, pbdb.extensions)
+ }
+ fileExtMap := make(extensionMap)
+ for typeName, extensions := range metadata.msgExtensionMap {
+ messageExtMap, found := fileExtMap[typeName]
+ if !found {
+ messageExtMap = make(map[string]*FieldDescription)
+ }
+ for _, ext := range extensions {
+ extDesc := dynamicpb.NewExtensionType(ext).TypeDescriptor()
+ messageExtMap[string(ext.FullName())] = newFieldDescription(extDesc)
+ }
+ fileExtMap[typeName] = messageExtMap
}
return &FileDescription{
+ name: fileDesc.Path(),
types: types,
enums: enums,
- }
+ }, fileExtMap
}
// FileDescription holds a map of all types and enum values declared within a proto file.
type FileDescription struct {
+ name string
types map[string]*TypeDescription
enums map[string]*EnumValueDescription
}
+// Copy creates a copy of the FileDescription with updated Db references within its types.
+func (fd *FileDescription) Copy(pbdb *Db) *FileDescription {
+ typesCopy := make(map[string]*TypeDescription, len(fd.types))
+ for k, v := range fd.types {
+ typesCopy[k] = v.Copy(pbdb)
+ }
+ return &FileDescription{
+ name: fd.name,
+ types: typesCopy,
+ enums: fd.enums,
+ }
+}
+
+// GetName returns the fully qualified file path for the file.
+func (fd *FileDescription) GetName() string {
+ return fd.name
+}
+
// GetEnumDescription returns an EnumDescription for a qualified enum value
// name declared within the .proto file.
func (fd *FileDescription) GetEnumDescription(enumName string) (*EnumValueDescription, bool) {
@@ -94,6 +128,10 @@ type fileMetadata struct {
msgTypes map[string]protoreflect.MessageDescriptor
// enumValues maps from fully-qualified enum value to enum value descriptor.
enumValues map[string]protoreflect.EnumValueDescriptor
+ // msgExtensionMap maps from the protobuf message name being extended to a set of extensions
+ // for the type.
+ msgExtensionMap map[string][]protoreflect.ExtensionDescriptor
+
// TODO: support enum type definitions for use in future type-check enhancements.
}
@@ -102,28 +140,38 @@ type fileMetadata struct {
func collectFileMetadata(fileDesc protoreflect.FileDescriptor) *fileMetadata {
msgTypes := make(map[string]protoreflect.MessageDescriptor)
enumValues := make(map[string]protoreflect.EnumValueDescriptor)
- collectMsgTypes(fileDesc.Messages(), msgTypes, enumValues)
+ msgExtensionMap := make(map[string][]protoreflect.ExtensionDescriptor)
+ collectMsgTypes(fileDesc.Messages(), msgTypes, enumValues, msgExtensionMap)
collectEnumValues(fileDesc.Enums(), enumValues)
+ collectExtensions(fileDesc.Extensions(), msgExtensionMap)
return &fileMetadata{
- msgTypes: msgTypes,
- enumValues: enumValues,
+ msgTypes: msgTypes,
+ enumValues: enumValues,
+ msgExtensionMap: msgExtensionMap,
}
}
// collectMsgTypes recursively collects messages, nested messages, and nested enums into a map of
// fully qualified protobuf names to descriptors.
-func collectMsgTypes(msgTypes protoreflect.MessageDescriptors, msgTypeMap map[string]protoreflect.MessageDescriptor, enumValueMap map[string]protoreflect.EnumValueDescriptor) {
+func collectMsgTypes(msgTypes protoreflect.MessageDescriptors,
+ msgTypeMap map[string]protoreflect.MessageDescriptor,
+ enumValueMap map[string]protoreflect.EnumValueDescriptor,
+ msgExtensionMap map[string][]protoreflect.ExtensionDescriptor) {
for i := 0; i < msgTypes.Len(); i++ {
msgType := msgTypes.Get(i)
msgTypeMap[string(msgType.FullName())] = msgType
nestedMsgTypes := msgType.Messages()
if nestedMsgTypes.Len() != 0 {
- collectMsgTypes(nestedMsgTypes, msgTypeMap, enumValueMap)
+ collectMsgTypes(nestedMsgTypes, msgTypeMap, enumValueMap, msgExtensionMap)
}
nestedEnumTypes := msgType.Enums()
if nestedEnumTypes.Len() != 0 {
collectEnumValues(nestedEnumTypes, enumValueMap)
}
+ nestedExtensions := msgType.Extensions()
+ if nestedExtensions.Len() != 0 {
+ collectExtensions(nestedExtensions, msgExtensionMap)
+ }
}
}
@@ -139,3 +187,16 @@ func collectEnumValues(enumTypes protoreflect.EnumDescriptors, enumValueMap map[
}
}
}
+
+func collectExtensions(extensions protoreflect.ExtensionDescriptors, msgExtensionMap map[string][]protoreflect.ExtensionDescriptor) {
+ for i := 0; i < extensions.Len(); i++ {
+ ext := extensions.Get(i)
+ extendsMsg := string(ext.ContainingMessage().FullName())
+ msgExts, found := msgExtensionMap[extendsMsg]
+ if !found {
+ msgExts = []protoreflect.ExtensionDescriptor{}
+ }
+ msgExts = append(msgExts, ext)
+ msgExtensionMap[extendsMsg] = msgExts
+ }
+}
diff --git a/vendor/github.com/google/cel-go/common/types/pb/pb.go b/vendor/github.com/google/cel-go/common/types/pb/pb.go
index 457b47ceee..eadebcb04e 100644
--- a/vendor/github.com/google/cel-go/common/types/pb/pb.go
+++ b/vendor/github.com/google/cel-go/common/types/pb/pb.go
@@ -40,13 +40,19 @@ type Db struct {
revFileDescriptorMap map[string]*FileDescription
// files contains the deduped set of FileDescriptions whose types are contained in the pb.Db.
files []*FileDescription
+ // extensions contains the mapping between a given type name, extension name and its FieldDescription
+ extensions map[string]map[string]*FieldDescription
}
+// extensionsMap is a type alias to a map[typeName]map[extensionName]*FieldDescription
+type extensionMap = map[string]map[string]*FieldDescription
+
var (
// DefaultDb used at evaluation time or unless overridden at check time.
DefaultDb = &Db{
revFileDescriptorMap: make(map[string]*FileDescription),
files: []*FileDescription{},
+ extensions: make(extensionMap),
}
)
@@ -80,6 +86,7 @@ func NewDb() *Db {
pbdb := &Db{
revFileDescriptorMap: make(map[string]*FileDescription),
files: []*FileDescription{},
+ extensions: make(extensionMap),
}
// The FileDescription objects in the default db contain lazily initialized TypeDescription
// values which may point to the state contained in the DefaultDb irrespective of this shallow
@@ -96,19 +103,34 @@ func NewDb() *Db {
// Copy creates a copy of the current database with its own internal descriptor mapping.
func (pbdb *Db) Copy() *Db {
copy := NewDb()
- for k, v := range pbdb.revFileDescriptorMap {
- copy.revFileDescriptorMap[k] = v
- }
- for _, f := range pbdb.files {
+ for _, fd := range pbdb.files {
hasFile := false
- for _, f2 := range copy.files {
- if f2 == f {
+ for _, fd2 := range copy.files {
+ if fd2 == fd {
hasFile = true
}
}
if !hasFile {
- copy.files = append(copy.files, f)
+ fd = fd.Copy(copy)
+ copy.files = append(copy.files, fd)
+ }
+ for _, enumValName := range fd.GetEnumNames() {
+ copy.revFileDescriptorMap[enumValName] = fd
+ }
+ for _, msgTypeName := range fd.GetTypeNames() {
+ copy.revFileDescriptorMap[msgTypeName] = fd
+ }
+ copy.revFileDescriptorMap[fd.GetName()] = fd
+ }
+ for typeName, extFieldMap := range pbdb.extensions {
+ copyExtFieldMap, found := copy.extensions[typeName]
+ if !found {
+ copyExtFieldMap = make(map[string]*FieldDescription, len(extFieldMap))
}
+ for extFieldName, fd := range extFieldMap {
+ copyExtFieldMap[extFieldName] = fd
+ }
+ copy.extensions[typeName] = copyExtFieldMap
}
return copy
}
@@ -137,17 +159,30 @@ func (pbdb *Db) RegisterDescriptor(fileDesc protoreflect.FileDescriptor) (*FileD
if err == nil {
fileDesc = globalFD
}
- fd = NewFileDescription(fileDesc, pbdb)
+ var fileExtMap extensionMap
+ fd, fileExtMap = newFileDescription(fileDesc, pbdb)
for _, enumValName := range fd.GetEnumNames() {
pbdb.revFileDescriptorMap[enumValName] = fd
}
for _, msgTypeName := range fd.GetTypeNames() {
pbdb.revFileDescriptorMap[msgTypeName] = fd
}
- pbdb.revFileDescriptorMap[fileDesc.Path()] = fd
+ pbdb.revFileDescriptorMap[fd.GetName()] = fd
// Return the specific file descriptor registered.
pbdb.files = append(pbdb.files, fd)
+
+ // Index the protobuf message extensions from the file into the pbdb
+ for typeName, extMap := range fileExtMap {
+ typeExtMap, found := pbdb.extensions[typeName]
+ if !found {
+ pbdb.extensions[typeName] = extMap
+ continue
+ }
+ for extName, field := range extMap {
+ typeExtMap[extName] = field
+ }
+ }
return fd, nil
}
diff --git a/vendor/github.com/google/cel-go/common/types/pb/type.go b/vendor/github.com/google/cel-go/common/types/pb/type.go
index 912076fa48..df9532156a 100644
--- a/vendor/github.com/google/cel-go/common/types/pb/type.go
+++ b/vendor/github.com/google/cel-go/common/types/pb/type.go
@@ -38,22 +38,23 @@ type description interface {
Zero() proto.Message
}
-// NewTypeDescription produces a TypeDescription value for the fully-qualified proto type name
+// newTypeDescription produces a TypeDescription value for the fully-qualified proto type name
// with a given descriptor.
-func NewTypeDescription(typeName string, desc protoreflect.MessageDescriptor) *TypeDescription {
+func newTypeDescription(typeName string, desc protoreflect.MessageDescriptor, extensions extensionMap) *TypeDescription {
msgType := dynamicpb.NewMessageType(desc)
msgZero := dynamicpb.NewMessage(desc)
fieldMap := map[string]*FieldDescription{}
fields := desc.Fields()
for i := 0; i < fields.Len(); i++ {
f := fields.Get(i)
- fieldMap[string(f.Name())] = NewFieldDescription(f)
+ fieldMap[string(f.Name())] = newFieldDescription(f)
}
return &TypeDescription{
typeName: typeName,
desc: desc,
msgType: msgType,
fieldMap: fieldMap,
+ extensions: extensions,
reflectType: reflectTypeOf(msgZero),
zeroMsg: zeroValueOf(msgZero),
}
@@ -66,10 +67,24 @@ type TypeDescription struct {
desc protoreflect.MessageDescriptor
msgType protoreflect.MessageType
fieldMap map[string]*FieldDescription
+ extensions extensionMap
reflectType reflect.Type
zeroMsg proto.Message
}
+// Copy copies the type description with updated references to the Db.
+func (td *TypeDescription) Copy(pbdb *Db) *TypeDescription {
+ return &TypeDescription{
+ typeName: td.typeName,
+ desc: td.desc,
+ msgType: td.msgType,
+ fieldMap: td.fieldMap,
+ extensions: pbdb.extensions,
+ reflectType: td.reflectType,
+ zeroMsg: td.zeroMsg,
+ }
+}
+
// FieldMap returns a string field name to FieldDescription map.
func (td *TypeDescription) FieldMap() map[string]*FieldDescription {
return td.fieldMap
@@ -78,16 +93,21 @@ func (td *TypeDescription) FieldMap() map[string]*FieldDescription {
// FieldByName returns (FieldDescription, true) if the field name is declared within the type.
func (td *TypeDescription) FieldByName(name string) (*FieldDescription, bool) {
fd, found := td.fieldMap[name]
+ if found {
+ return fd, true
+ }
+ extFieldMap, found := td.extensions[td.typeName]
if !found {
return nil, false
}
- return fd, true
+ fd, found = extFieldMap[name]
+ return fd, found
}
// MaybeUnwrap accepts a proto message as input and unwraps it to a primitive CEL type if possible.
//
// This method returns the unwrapped value and 'true', else the original value and 'false'.
-func (td *TypeDescription) MaybeUnwrap(msg proto.Message) (interface{}, bool, error) {
+func (td *TypeDescription) MaybeUnwrap(msg proto.Message) (any, bool, error) {
return unwrap(td, msg)
}
@@ -111,8 +131,8 @@ func (td *TypeDescription) Zero() proto.Message {
return td.zeroMsg
}
-// NewFieldDescription creates a new field description from a protoreflect.FieldDescriptor.
-func NewFieldDescription(fieldDesc protoreflect.FieldDescriptor) *FieldDescription {
+// newFieldDescription creates a new field description from a protoreflect.FieldDescriptor.
+func newFieldDescription(fieldDesc protoreflect.FieldDescriptor) *FieldDescription {
var reflectType reflect.Type
var zeroMsg proto.Message
switch fieldDesc.Kind() {
@@ -124,9 +144,17 @@ func NewFieldDescription(fieldDesc protoreflect.FieldDescriptor) *FieldDescripti
default:
reflectType = reflectTypeOf(fieldDesc.Default().Interface())
if fieldDesc.IsList() {
- parentMsg := dynamicpb.NewMessage(fieldDesc.ContainingMessage())
- listField := parentMsg.NewField(fieldDesc).List()
- elem := listField.NewElement().Interface()
+ var elemValue protoreflect.Value
+ if fieldDesc.IsExtension() {
+ et := dynamicpb.NewExtensionType(fieldDesc)
+ elemValue = et.New().List().NewElement()
+ } else {
+ parentMsgType := fieldDesc.ContainingMessage()
+ parentMsg := dynamicpb.NewMessage(parentMsgType)
+ listField := parentMsg.NewField(fieldDesc).List()
+ elemValue = listField.NewElement()
+ }
+ elem := elemValue.Interface()
switch elemType := elem.(type) {
case protoreflect.Message:
elem = elemType.Interface()
@@ -140,8 +168,8 @@ func NewFieldDescription(fieldDesc protoreflect.FieldDescriptor) *FieldDescripti
}
var keyType, valType *FieldDescription
if fieldDesc.IsMap() {
- keyType = NewFieldDescription(fieldDesc.MapKey())
- valType = NewFieldDescription(fieldDesc.MapValue())
+ keyType = newFieldDescription(fieldDesc.MapKey())
+ valType = newFieldDescription(fieldDesc.MapValue())
}
return &FieldDescription{
desc: fieldDesc,
@@ -195,7 +223,7 @@ func (fd *FieldDescription) Descriptor() protoreflect.FieldDescriptor {
//
// This function implements the FieldType.IsSet function contract which can be used to operate on
// more than just protobuf field accesses; however, the target here must be a protobuf.Message.
-func (fd *FieldDescription) IsSet(target interface{}) bool {
+func (fd *FieldDescription) IsSet(target any) bool {
switch v := target.(type) {
case proto.Message:
pbRef := v.ProtoReflect()
@@ -219,14 +247,14 @@ func (fd *FieldDescription) IsSet(target interface{}) bool {
//
// This function implements the FieldType.GetFrom function contract which can be used to operate
// on more than just protobuf field accesses; however, the target here must be a protobuf.Message.
-func (fd *FieldDescription) GetFrom(target interface{}) (interface{}, error) {
+func (fd *FieldDescription) GetFrom(target any) (any, error) {
v, ok := target.(proto.Message)
if !ok {
return nil, fmt.Errorf("unsupported field selection target: (%T)%v", target, target)
}
pbRef := v.ProtoReflect()
pbDesc := pbRef.Descriptor()
- var fieldVal interface{}
+ var fieldVal any
if pbDesc == fd.desc.ContainingMessage() {
// When the target protobuf shares the same message descriptor instance as the field
// descriptor, use the cached field descriptor value.
@@ -289,7 +317,7 @@ func (fd *FieldDescription) IsList() bool {
//
// This function returns the unwrapped value and 'true' on success, or the original value
// and 'false' otherwise.
-func (fd *FieldDescription) MaybeUnwrapDynamic(msg protoreflect.Message) (interface{}, bool, error) {
+func (fd *FieldDescription) MaybeUnwrapDynamic(msg protoreflect.Message) (any, bool, error) {
return unwrapDynamic(fd, msg)
}
@@ -362,7 +390,7 @@ func checkedWrap(t *exprpb.Type) *exprpb.Type {
// input message is a *dynamicpb.Message which obscures the typing information from Go.
//
// Returns the unwrapped value and 'true' if unwrapped, otherwise the input value and 'false'.
-func unwrap(desc description, msg proto.Message) (interface{}, bool, error) {
+func unwrap(desc description, msg proto.Message) (any, bool, error) {
switch v := msg.(type) {
case *anypb.Any:
dynMsg, err := v.UnmarshalNew()
@@ -418,7 +446,7 @@ func unwrap(desc description, msg proto.Message) (interface{}, bool, error) {
// unwrapDynamic unwraps a reflected protobuf Message value.
//
// Returns the unwrapped value and 'true' if unwrapped, otherwise the input value and 'false'.
-func unwrapDynamic(desc description, refMsg protoreflect.Message) (interface{}, bool, error) {
+func unwrapDynamic(desc description, refMsg protoreflect.Message) (any, bool, error) {
msg := refMsg.Interface()
if !refMsg.IsValid() {
msg = desc.Zero()
@@ -508,7 +536,7 @@ func unwrapDynamic(desc description, refMsg protoreflect.Message) (interface{},
// reflectTypeOf intercepts the reflect.Type call to ensure that dynamicpb.Message types preserve
// well-known protobuf reflected types expected by the CEL type system.
-func reflectTypeOf(val interface{}) reflect.Type {
+func reflectTypeOf(val any) reflect.Type {
switch v := val.(type) {
case proto.Message:
return reflect.TypeOf(zeroValueOf(v))
diff --git a/vendor/github.com/google/cel-go/common/types/provider.go b/vendor/github.com/google/cel-go/common/types/provider.go
index 02087d14e3..e66951f5b2 100644
--- a/vendor/github.com/google/cel-go/common/types/provider.go
+++ b/vendor/github.com/google/cel-go/common/types/provider.go
@@ -19,11 +19,12 @@ import (
"reflect"
"time"
+ "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/reflect/protoreflect"
+
"github.com/google/cel-go/common/types/pb"
"github.com/google/cel-go/common/types/ref"
"github.com/google/cel-go/common/types/traits"
- "google.golang.org/protobuf/proto"
- "google.golang.org/protobuf/reflect/protoreflect"
exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
anypb "google.golang.org/protobuf/types/known/anypb"
@@ -195,7 +196,7 @@ func (p *protoTypeRegistry) RegisterType(types ...ref.Type) error {
// providing support for custom proto-based types.
//
// This method should be the inverse of ref.Val.ConvertToNative.
-func (p *protoTypeRegistry) NativeToValue(value interface{}) ref.Val {
+func (p *protoTypeRegistry) NativeToValue(value any) ref.Val {
if val, found := nativeToValue(p, value); found {
return val
}
@@ -249,7 +250,7 @@ var (
)
// NativeToValue implements the ref.TypeAdapter interface.
-func (a *defaultTypeAdapter) NativeToValue(value interface{}) ref.Val {
+func (a *defaultTypeAdapter) NativeToValue(value any) ref.Val {
if val, found := nativeToValue(a, value); found {
return val
}
@@ -258,7 +259,7 @@ func (a *defaultTypeAdapter) NativeToValue(value interface{}) ref.Val {
// nativeToValue returns the converted (ref.Val, true) of a conversion is found,
// otherwise (nil, false)
-func nativeToValue(a ref.TypeAdapter, value interface{}) (ref.Val, bool) {
+func nativeToValue(a ref.TypeAdapter, value any) (ref.Val, bool) {
switch v := value.(type) {
case nil:
return NullValue, true
@@ -364,7 +365,7 @@ func nativeToValue(a ref.TypeAdapter, value interface{}) (ref.Val, bool) {
// specializations for common map types.
case map[string]string:
return NewStringStringMap(a, v), true
- case map[string]interface{}:
+ case map[string]any:
return NewStringInterfaceMap(a, v), true
case map[ref.Val]ref.Val:
return NewRefValMap(a, v), true
@@ -479,9 +480,12 @@ func msgSetField(target protoreflect.Message, field *pb.FieldDescription, val re
if err != nil {
return fieldTypeConversionError(field, err)
}
- switch v.(type) {
+ if v == nil {
+ return nil
+ }
+ switch pv := v.(type) {
case proto.Message:
- v = v.(proto.Message).ProtoReflect()
+ v = pv.ProtoReflect()
}
target.Set(field.Descriptor(), protoreflect.ValueOf(v))
return nil
@@ -495,6 +499,9 @@ func msgSetListField(target protoreflect.List, listField *pb.FieldDescription, l
if err != nil {
return fieldTypeConversionError(listField, err)
}
+ if elemVal == nil {
+ continue
+ }
switch ev := elemVal.(type) {
case proto.Message:
elemVal = ev.ProtoReflect()
@@ -519,9 +526,12 @@ func msgSetMapField(target protoreflect.Map, mapField *pb.FieldDescription, mapV
if err != nil {
return fieldTypeConversionError(mapField, err)
}
- switch v.(type) {
+ if v == nil {
+ continue
+ }
+ switch pv := v.(type) {
case proto.Message:
- v = v.(proto.Message).ProtoReflect()
+ v = pv.ProtoReflect()
}
target.Set(protoreflect.ValueOf(k).MapKey(), protoreflect.ValueOf(v))
}
diff --git a/vendor/github.com/google/cel-go/common/types/ref/BUILD.bazel b/vendor/github.com/google/cel-go/common/types/ref/BUILD.bazel
index 1d0f468993..79330c3321 100644
--- a/vendor/github.com/google/cel-go/common/types/ref/BUILD.bazel
+++ b/vendor/github.com/google/cel-go/common/types/ref/BUILD.bazel
@@ -13,7 +13,7 @@ go_library(
],
importpath = "github.com/google/cel-go/common/types/ref",
deps = [
- "@org_golang_google_genproto//googleapis/api/expr/v1alpha1:go_default_library",
+ "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library",
"@org_golang_google_protobuf//proto:go_default_library",
"@org_golang_google_protobuf//reflect/protoreflect:go_default_library",
],
diff --git a/vendor/github.com/google/cel-go/common/types/ref/provider.go b/vendor/github.com/google/cel-go/common/types/ref/provider.go
index 91a711fa70..7eabbb9ca3 100644
--- a/vendor/github.com/google/cel-go/common/types/ref/provider.go
+++ b/vendor/github.com/google/cel-go/common/types/ref/provider.go
@@ -39,8 +39,6 @@ type TypeProvider interface {
// FieldFieldType returns the field type for a checked type value. Returns
// false if the field could not be found.
- //
- // Used during type-checking only.
FindFieldType(messageType string, fieldName string) (*FieldType, bool)
// NewValue creates a new type value from a qualified name and map of field
@@ -55,7 +53,7 @@ type TypeProvider interface {
// TypeAdapter converts native Go values of varying type and complexity to equivalent CEL values.
type TypeAdapter interface {
// NativeToValue converts the input `value` to a CEL `ref.Val`.
- NativeToValue(value interface{}) Val
+ NativeToValue(value any) Val
}
// TypeRegistry allows third-parties to add custom types to CEL. Not all `TypeProvider`
@@ -97,7 +95,7 @@ type FieldType struct {
}
// FieldTester is used to test field presence on an input object.
-type FieldTester func(target interface{}) bool
+type FieldTester func(target any) bool
// FieldGetter is used to get the field value from an input object, if set.
-type FieldGetter func(target interface{}) (interface{}, error)
+type FieldGetter func(target any) (any, error)
diff --git a/vendor/github.com/google/cel-go/common/types/ref/reference.go b/vendor/github.com/google/cel-go/common/types/ref/reference.go
index 3098580c91..e0d58145cd 100644
--- a/vendor/github.com/google/cel-go/common/types/ref/reference.go
+++ b/vendor/github.com/google/cel-go/common/types/ref/reference.go
@@ -37,9 +37,18 @@ type Type interface {
type Val interface {
// ConvertToNative converts the Value to a native Go struct according to the
// reflected type description, or error if the conversion is not feasible.
- ConvertToNative(typeDesc reflect.Type) (interface{}, error)
+ //
+ // The ConvertToNative method is intended to be used to support conversion between CEL types
+ // and native types during object creation expressions or by clients who need to adapt the,
+ // returned CEL value into an equivalent Go value instance.
+ //
+ // When implementing or using ConvertToNative, the following guidelines apply:
+ // - Use ConvertToNative when marshalling CEL evaluation results to native types.
+ // - Do not use ConvertToNative within CEL extension functions.
+ // - Document whether your implementation supports non-CEL field types, such as Go or Protobuf.
+ ConvertToNative(typeDesc reflect.Type) (any, error)
- // ConvertToType supports type conversions between value types supported by the expression language.
+ // ConvertToType supports type conversions between CEL value types supported by the expression language.
ConvertToType(typeValue Type) Val
// Equal returns true if the `other` value has the same type and content as the implementing struct.
@@ -50,5 +59,5 @@ type Val interface {
// Value returns the raw value of the instance which may not be directly compatible with the expression
// language types.
- Value() interface{}
+ Value() any
}
diff --git a/vendor/github.com/google/cel-go/common/types/string.go b/vendor/github.com/google/cel-go/common/types/string.go
index b6d665683c..a65cc14e4c 100644
--- a/vendor/github.com/google/cel-go/common/types/string.go
+++ b/vendor/github.com/google/cel-go/common/types/string.go
@@ -72,7 +72,7 @@ func (s String) Compare(other ref.Val) ref.Val {
}
// ConvertToNative implements ref.Val.ConvertToNative.
-func (s String) ConvertToNative(typeDesc reflect.Type) (interface{}, error) {
+func (s String) ConvertToNative(typeDesc reflect.Type) (any, error) {
switch typeDesc.Kind() {
case reflect.String:
if reflect.TypeOf(s).AssignableTo(typeDesc) {
@@ -154,6 +154,11 @@ func (s String) Equal(other ref.Val) ref.Val {
return Bool(ok && s == otherString)
}
+// IsZeroValue returns true if the string is empty.
+func (s String) IsZeroValue() bool {
+ return len(s) == 0
+}
+
// Match implements traits.Matcher.Match.
func (s String) Match(pattern ref.Val) ref.Val {
pat, ok := pattern.(String)
@@ -189,7 +194,7 @@ func (s String) Type() ref.Type {
}
// Value implements ref.Val.Value.
-func (s String) Value() interface{} {
+func (s String) Value() any {
return string(s)
}
diff --git a/vendor/github.com/google/cel-go/common/types/timestamp.go b/vendor/github.com/google/cel-go/common/types/timestamp.go
index 7513a1b210..c784f2e54b 100644
--- a/vendor/github.com/google/cel-go/common/types/timestamp.go
+++ b/vendor/github.com/google/cel-go/common/types/timestamp.go
@@ -89,7 +89,7 @@ func (t Timestamp) Compare(other ref.Val) ref.Val {
}
// ConvertToNative implements ref.Val.ConvertToNative.
-func (t Timestamp) ConvertToNative(typeDesc reflect.Type) (interface{}, error) {
+func (t Timestamp) ConvertToNative(typeDesc reflect.Type) (any, error) {
// If the timestamp is already assignable to the desired type return it.
if reflect.TypeOf(t.Time).AssignableTo(typeDesc) {
return t.Time, nil
@@ -138,6 +138,11 @@ func (t Timestamp) Equal(other ref.Val) ref.Val {
return Bool(ok && t.Time.Equal(otherTime.Time))
}
+// IsZeroValue returns true if the timestamp is epoch 0.
+func (t Timestamp) IsZeroValue() bool {
+ return t.IsZero()
+}
+
// Receive implements traits.Receiver.Receive.
func (t Timestamp) Receive(function string, overload string, args []ref.Val) ref.Val {
switch len(args) {
@@ -160,14 +165,14 @@ func (t Timestamp) Subtract(subtrahend ref.Val) ref.Val {
dur := subtrahend.(Duration)
val, err := subtractTimeDurationChecked(t.Time, dur.Duration)
if err != nil {
- return wrapErr(err)
+ return WrapErr(err)
}
return timestampOf(val)
case TimestampType:
t2 := subtrahend.(Timestamp).Time
val, err := subtractTimeChecked(t.Time, t2)
if err != nil {
- return wrapErr(err)
+ return WrapErr(err)
}
return durationOf(val)
}
@@ -180,7 +185,7 @@ func (t Timestamp) Type() ref.Type {
}
// Value implements ref.Val.Value.
-func (t Timestamp) Value() interface{} {
+func (t Timestamp) Value() any {
return t.Time
}
@@ -288,7 +293,7 @@ func timeZone(tz ref.Val, visitor timestampVisitor) timestampVisitor {
if ind == -1 {
loc, err := time.LoadLocation(val)
if err != nil {
- return wrapErr(err)
+ return WrapErr(err)
}
return visitor(t.In(loc))
}
@@ -297,11 +302,11 @@ func timeZone(tz ref.Val, visitor timestampVisitor) timestampVisitor {
// in the format ^(+|-)(0[0-9]|1[0-4]):[0-5][0-9]$. The numerical input is parsed in terms of hours and minutes.
hr, err := strconv.Atoi(string(val[0:ind]))
if err != nil {
- return wrapErr(err)
+ return WrapErr(err)
}
min, err := strconv.Atoi(string(val[ind+1:]))
if err != nil {
- return wrapErr(err)
+ return WrapErr(err)
}
var offset int
if string(val[0]) == "-" {
diff --git a/vendor/github.com/google/cel-go/common/types/traits/BUILD.bazel b/vendor/github.com/google/cel-go/common/types/traits/BUILD.bazel
index 86e54af61a..b19eb8301e 100644
--- a/vendor/github.com/google/cel-go/common/types/traits/BUILD.bazel
+++ b/vendor/github.com/google/cel-go/common/types/traits/BUILD.bazel
@@ -20,6 +20,7 @@ go_library(
"receiver.go",
"sizer.go",
"traits.go",
+ "zeroer.go",
],
importpath = "github.com/google/cel-go/common/types/traits",
deps = [
diff --git a/vendor/github.com/google/cel-go/interpreter/coster.go b/vendor/github.com/google/cel-go/common/types/traits/zeroer.go
similarity index 50%
rename from vendor/github.com/google/cel-go/interpreter/coster.go
rename to vendor/github.com/google/cel-go/common/types/traits/zeroer.go
index ac573d5745..0b7c830a24 100644
--- a/vendor/github.com/google/cel-go/interpreter/coster.go
+++ b/vendor/github.com/google/cel-go/common/types/traits/zeroer.go
@@ -1,4 +1,4 @@
-// Copyright 2020 Google LLC
+// Copyright 2022 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -12,24 +12,10 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-package interpreter
+package traits
-import "math"
-
-// TODO: remove Coster.
-
-// Coster calculates the heuristic cost incurred during evaluation.
-// Deprecated: Please migrate cel.EstimateCost, it supports length estimates for input data and cost estimates for
-// extension functions.
-type Coster interface {
- Cost() (min, max int64)
-}
-
-// estimateCost returns the heuristic cost interval for the program.
-func estimateCost(i interface{}) (min, max int64) {
- c, ok := i.(Coster)
- if !ok {
- return 0, math.MaxInt64
- }
- return c.Cost()
+// Zeroer interface for testing whether a CEL value is a zero value for its type.
+type Zeroer interface {
+ // IsZeroValue indicates whether the object is the zero value for the type.
+ IsZeroValue() bool
}
diff --git a/vendor/github.com/google/cel-go/common/types/type.go b/vendor/github.com/google/cel-go/common/types/type.go
index 21160974bb..164a460503 100644
--- a/vendor/github.com/google/cel-go/common/types/type.go
+++ b/vendor/github.com/google/cel-go/common/types/type.go
@@ -53,7 +53,7 @@ func NewObjectTypeValue(name string) *TypeValue {
}
// ConvertToNative implements ref.Val.ConvertToNative.
-func (t *TypeValue) ConvertToNative(typeDesc reflect.Type) (interface{}, error) {
+func (t *TypeValue) ConvertToNative(typeDesc reflect.Type) (any, error) {
// TODO: replace the internal type representation with a proto-value.
return nil, fmt.Errorf("type conversion not supported for 'type'")
}
@@ -97,6 +97,6 @@ func (t *TypeValue) TypeName() string {
}
// Value implements ref.Val.Value.
-func (t *TypeValue) Value() interface{} {
+func (t *TypeValue) Value() any {
return t.name
}
diff --git a/vendor/github.com/google/cel-go/common/types/uint.go b/vendor/github.com/google/cel-go/common/types/uint.go
index ca266e0457..615c7ec523 100644
--- a/vendor/github.com/google/cel-go/common/types/uint.go
+++ b/vendor/github.com/google/cel-go/common/types/uint.go
@@ -59,7 +59,7 @@ func (i Uint) Add(other ref.Val) ref.Val {
}
val, err := addUint64Checked(uint64(i), uint64(otherUint))
if err != nil {
- return wrapErr(err)
+ return WrapErr(err)
}
return Uint(val)
}
@@ -82,7 +82,7 @@ func (i Uint) Compare(other ref.Val) ref.Val {
}
// ConvertToNative implements ref.Val.ConvertToNative.
-func (i Uint) ConvertToNative(typeDesc reflect.Type) (interface{}, error) {
+func (i Uint) ConvertToNative(typeDesc reflect.Type) (any, error) {
switch typeDesc.Kind() {
case reflect.Uint, reflect.Uint32:
v, err := uint64ToUint32Checked(uint64(i))
@@ -149,7 +149,7 @@ func (i Uint) ConvertToType(typeVal ref.Type) ref.Val {
case IntType:
v, err := uint64ToInt64Checked(uint64(i))
if err != nil {
- return wrapErr(err)
+ return WrapErr(err)
}
return Int(v)
case UintType:
@@ -172,7 +172,7 @@ func (i Uint) Divide(other ref.Val) ref.Val {
}
div, err := divideUint64Checked(uint64(i), uint64(otherUint))
if err != nil {
- return wrapErr(err)
+ return WrapErr(err)
}
return Uint(div)
}
@@ -194,6 +194,11 @@ func (i Uint) Equal(other ref.Val) ref.Val {
}
}
+// IsZeroValue returns true if the uint is zero.
+func (i Uint) IsZeroValue() bool {
+ return i == 0
+}
+
// Modulo implements traits.Modder.Modulo.
func (i Uint) Modulo(other ref.Val) ref.Val {
otherUint, ok := other.(Uint)
@@ -202,7 +207,7 @@ func (i Uint) Modulo(other ref.Val) ref.Val {
}
mod, err := moduloUint64Checked(uint64(i), uint64(otherUint))
if err != nil {
- return wrapErr(err)
+ return WrapErr(err)
}
return Uint(mod)
}
@@ -215,7 +220,7 @@ func (i Uint) Multiply(other ref.Val) ref.Val {
}
val, err := multiplyUint64Checked(uint64(i), uint64(otherUint))
if err != nil {
- return wrapErr(err)
+ return WrapErr(err)
}
return Uint(val)
}
@@ -228,7 +233,7 @@ func (i Uint) Subtract(subtrahend ref.Val) ref.Val {
}
val, err := subtractUint64Checked(uint64(i), uint64(subtraUint))
if err != nil {
- return wrapErr(err)
+ return WrapErr(err)
}
return Uint(val)
}
@@ -239,7 +244,7 @@ func (i Uint) Type() ref.Type {
}
// Value implements ref.Val.Value.
-func (i Uint) Value() interface{} {
+func (i Uint) Value() any {
return uint64(i)
}
diff --git a/vendor/github.com/google/cel-go/common/types/unknown.go b/vendor/github.com/google/cel-go/common/types/unknown.go
index 95b47426fd..bc411c15b9 100644
--- a/vendor/github.com/google/cel-go/common/types/unknown.go
+++ b/vendor/github.com/google/cel-go/common/types/unknown.go
@@ -30,7 +30,7 @@ var (
)
// ConvertToNative implements ref.Val.ConvertToNative.
-func (u Unknown) ConvertToNative(typeDesc reflect.Type) (interface{}, error) {
+func (u Unknown) ConvertToNative(typeDesc reflect.Type) (any, error) {
return u.Value(), nil
}
@@ -50,7 +50,7 @@ func (u Unknown) Type() ref.Type {
}
// Value implements ref.Val.Value.
-func (u Unknown) Value() interface{} {
+func (u Unknown) Value() any {
return []int64(u)
}
diff --git a/vendor/github.com/google/cel-go/ext/BUILD.bazel b/vendor/github.com/google/cel-go/ext/BUILD.bazel
index 9c2520b408..4bcf8a283e 100644
--- a/vendor/github.com/google/cel-go/ext/BUILD.bazel
+++ b/vendor/github.com/google/cel-go/ext/BUILD.bazel
@@ -9,14 +9,30 @@ go_library(
srcs = [
"encoders.go",
"guards.go",
+ "math.go",
+ "native.go",
+ "protos.go",
+ "sets.go",
"strings.go",
],
importpath = "github.com/google/cel-go/ext",
visibility = ["//visibility:public"],
deps = [
"//cel:go_default_library",
+ "//checker/decls:go_default_library",
+ "//common:go_default_library",
+ "//common/overloads:go_default_library",
"//common/types:go_default_library",
+ "//common/types/pb:go_default_library",
"//common/types/ref:go_default_library",
+ "//common/types/traits:go_default_library",
+ "//interpreter:go_default_library",
+ "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library",
+ "@org_golang_google_protobuf//proto:go_default_library",
+ "@org_golang_google_protobuf//reflect/protoreflect:go_default_library",
+ "@org_golang_google_protobuf//types/known/structpb",
+ "@org_golang_x_text//language:go_default_library",
+ "@org_golang_x_text//message:go_default_library",
],
)
@@ -25,6 +41,10 @@ go_test(
size = "small",
srcs = [
"encoders_test.go",
+ "math_test.go",
+ "native_test.go",
+ "protos_test.go",
+ "sets_test.go",
"strings_test.go",
],
embed = [
@@ -32,5 +52,17 @@ go_test(
],
deps = [
"//cel:go_default_library",
+ "//checker:go_default_library",
+ "//common:go_default_library",
+ "//common/types:go_default_library",
+ "//common/types/ref:go_default_library",
+ "//common/types/traits:go_default_library",
+ "//test:go_default_library",
+ "//test/proto2pb:go_default_library",
+ "//test/proto3pb:go_default_library",
+ "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library",
+ "@org_golang_google_protobuf//proto:go_default_library",
+ "@org_golang_google_protobuf//types/known/wrapperspb:go_default_library",
+ "@org_golang_google_protobuf//encoding/protojson:go_default_library",
],
)
diff --git a/vendor/github.com/google/cel-go/ext/README.md b/vendor/github.com/google/cel-go/ext/README.md
index 5ddcc41510..ef0eb2ab7f 100644
--- a/vendor/github.com/google/cel-go/ext/README.md
+++ b/vendor/github.com/google/cel-go/ext/README.md
@@ -3,6 +3,30 @@
CEL extensions are a related set of constants, functions, macros, or other
features which may not be covered by the core CEL spec.
+## Bindings
+
+Returns a cel.EnvOption to configure support for local variable bindings
+in expressions.
+
+# Cel.Bind
+
+Binds a simple identifier to an initialization expression which may be used
+in a subsequenct result expression. Bindings may also be nested within each
+other.
+
+ cel.bind(, , )
+
+Examples:
+
+ cel.bind(a, 'hello',
+ cel.bind(b, 'world', a + b + b + a)) // "helloworldworldhello"
+
+ // Avoid a list allocation within the exists comprehension.
+ cel.bind(valid_values, [a, b, c],
+ [d, e, f].exists(elem, elem in valid_values))
+
+Local bindings are not guaranteed to be evaluated before use.
+
## Encoders
Encoding utilies for marshalling data into standardized representations.
@@ -31,6 +55,156 @@ Example:
base64.encode(b'hello') // return 'aGVsbG8='
+## Math
+
+Math helper macros and functions.
+
+Note, all macros use the 'math' namespace; however, at the time of macro
+expansion the namespace looks just like any other identifier. If you are
+currently using a variable named 'math', the macro will likely work just as
+intended; however, there is some chance for collision.
+
+### Math.Greatest
+
+Returns the greatest valued number present in the arguments to the macro.
+
+Greatest is a variable argument count macro which must take at least one
+argument. Simple numeric and list literals are supported as valid argument
+types; however, other literals will be flagged as errors during macro
+expansion. If the argument expression does not resolve to a numeric or
+list(numeric) type during type-checking, or during runtime then an error
+will be produced. If a list argument is empty, this too will produce an
+error.
+
+ math.greatest(, ...) ->
+
+Examples:
+
+ math.greatest(1) // 1
+ math.greatest(1u, 2u) // 2u
+ math.greatest(-42.0, -21.5, -100.0) // -21.5
+ math.greatest([-42.0, -21.5, -100.0]) // -21.5
+ math.greatest(numbers) // numbers must be list(numeric)
+
+ math.greatest() // parse error
+ math.greatest('string') // parse error
+ math.greatest(a, b) // check-time error if a or b is non-numeric
+ math.greatest(dyn('string')) // runtime error
+
+### Math.Least
+
+Returns the least valued number present in the arguments to the macro.
+
+Least is a variable argument count macro which must take at least one
+argument. Simple numeric and list literals are supported as valid argument
+types; however, other literals will be flagged as errors during macro
+expansion. If the argument expression does not resolve to a numeric or
+list(numeric) type during type-checking, or during runtime then an error
+will be produced. If a list argument is empty, this too will produce an error.
+
+ math.least(, ...) ->
+
+Examples:
+
+ math.least(1) // 1
+ math.least(1u, 2u) // 1u
+ math.least(-42.0, -21.5, -100.0) // -100.0
+ math.least([-42.0, -21.5, -100.0]) // -100.0
+ math.least(numbers) // numbers must be list(numeric)
+
+ math.least() // parse error
+ math.least('string') // parse error
+ math.least(a, b) // check-time error if a or b is non-numeric
+ math.least(dyn('string')) // runtime error
+
+## Protos
+
+Protos configure extended macros and functions for proto manipulation.
+
+Note, all macros use the 'proto' namespace; however, at the time of macro
+expansion the namespace looks just like any other identifier. If you are
+currently using a variable named 'proto', the macro will likely work just as
+you intend; however, there is some chance for collision.
+
+### Protos.GetExt
+
+Macro which generates a select expression that retrieves an extension field
+from the input proto2 syntax message. If the field is not set, the default
+value forthe extension field is returned according to safe-traversal semantics.
+
+ proto.getExt(, ) ->
+
+Example:
+
+ proto.getExt(msg, google.expr.proto2.test.int32_ext) // returns int value
+
+### Protos.HasExt
+
+Macro which generates a test-only select expression that determines whether
+an extension field is set on a proto2 syntax message.
+
+ proto.hasExt(, ) ->
+
+Example:
+
+ proto.hasExt(msg, google.expr.proto2.test.int32_ext) // returns true || false
+
+## Sets
+
+Sets provides set relationship tests.
+
+There is no set type within CEL, and while one may be introduced in the
+future, there are cases where a `list` type is known to behave like a set.
+For such cases, this library provides some basic functionality for
+determining set containment, equivalence, and intersection.
+
+### Sets.Contains
+
+Returns whether the first list argument contains all elements in the second
+list argument. The list may contain elements of any type and standard CEL
+equality is used to determine whether a value exists in both lists. If the
+second list is empty, the result will always return true.
+
+ sets.contains(list(T), list(T)) -> bool
+
+Examples:
+
+ sets.contains([], []) // true
+ sets.contains([], [1]) // false
+ sets.contains([1, 2, 3, 4], [2, 3]) // true
+ sets.contains([1, 2.0, 3u], [1.0, 2u, 3]) // true
+
+### Sets.Equivalent
+
+Returns whether the first and second list are set equivalent. Lists are set
+equivalent if for every item in the first list, there is an element in the
+second which is equal. The lists may not be of the same size as they do not
+guarantee the elements within them are unique, so size does not factor into
+the computation.
+
+ sets.equivalent(list(T), list(T)) -> bool
+
+Examples:
+
+ sets.equivalent([], []) // true
+ sets.equivalent([1], [1, 1]) // true
+ sets.equivalent([1], [1u, 1.0]) // true
+ sets.equivalent([1, 2, 3], [3u, 2.0, 1]) // true
+
+### Sets.Intersects
+
+Returns whether the first list has at least one element whose value is equal
+to an element in the second list. If either list is empty, the result will
+be false.
+
+ sets.intersects(list(T), list(T)) -> bool
+
+Examples:
+
+ sets.intersects([1], []) // false
+ sets.intersects([1], [1, 2]) // true
+ sets.intersects([[1], [2, 3]], [[1, 2], [2, 3.0]]) // true
+
## Strings
Extended functions for string manipulation. As a general note, all indices are
@@ -70,6 +244,23 @@ Examples:
'hello mellow'.indexOf('ello', 2) // returns 7
'hello mellow'.indexOf('ello', 20) // error
+### Join
+
+Returns a new string where the elements of string list are concatenated.
+
+The function also accepts an optional separator which is placed between
+elements in the resulting string.
+
+ >.join() ->
+ >.join() ->
+
+Examples:
+
+ ['hello', 'mellow'].join() // returns 'hellomellow'
+ ['hello', 'mellow'].join(' ') // returns 'hello mellow'
+ [].join() // returns ''
+ [].join('/') // returns ''
+
### LastIndexOf
Returns the integer index of the last occurrence of the search string. If the
@@ -105,6 +296,20 @@ Examples:
'TacoCat'.lowerAscii() // returns 'tacocat'
'TacoCÆt Xii'.lowerAscii() // returns 'tacocÆt xii'
+### Quote
+
+**Introduced in version 1**
+
+Takes the given string and makes it safe to print (without any formatting due to escape sequences).
+If any invalid UTF-8 characters are encountered, they are replaced with \uFFFD.
+
+ strings.quote()
+
+Examples:
+
+ strings.quote('single-quote with "double quote"') // returns '"single-quote with \"double quote\""'
+ strings.quote("two escape sequences \a\n") // returns '"two escape sequences \\a\\n"'
+
### Replace
Returns a new string based on the target, which replaces the occurrences of a
diff --git a/vendor/github.com/google/cel-go/ext/bindings.go b/vendor/github.com/google/cel-go/ext/bindings.go
new file mode 100644
index 0000000000..9cc3c3efe5
--- /dev/null
+++ b/vendor/github.com/google/cel-go/ext/bindings.go
@@ -0,0 +1,100 @@
+// Copyright 2023 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ext
+
+import (
+ "github.com/google/cel-go/cel"
+ "github.com/google/cel-go/common"
+
+ exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
+)
+
+// Bindings returns a cel.EnvOption to configure support for local variable
+// bindings in expressions.
+//
+// # Cel.Bind
+//
+// Binds a simple identifier to an initialization expression which may be used
+// in a subsequenct result expression. Bindings may also be nested within each
+// other.
+//
+// cel.bind(, , )
+//
+// Examples:
+//
+// cel.bind(a, 'hello',
+// cel.bind(b, 'world', a + b + b + a)) // "helloworldworldhello"
+//
+// // Avoid a list allocation within the exists comprehension.
+// cel.bind(valid_values, [a, b, c],
+// [d, e, f].exists(elem, elem in valid_values))
+//
+// Local bindings are not guaranteed to be evaluated before use.
+func Bindings() cel.EnvOption {
+ return cel.Lib(celBindings{})
+}
+
+const (
+ celNamespace = "cel"
+ bindMacro = "bind"
+ unusedIterVar = "#unused"
+)
+
+type celBindings struct{}
+
+func (celBindings) LibraryName() string {
+ return "cel.lib.ext.cel.bindings"
+}
+
+func (celBindings) CompileOptions() []cel.EnvOption {
+ return []cel.EnvOption{
+ cel.Macros(
+ // cel.bind(var, , )
+ cel.NewReceiverMacro(bindMacro, 3, celBind),
+ ),
+ }
+}
+
+func (celBindings) ProgramOptions() []cel.ProgramOption {
+ return []cel.ProgramOption{}
+}
+
+func celBind(meh cel.MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *common.Error) {
+ if !macroTargetMatchesNamespace(celNamespace, target) {
+ return nil, nil
+ }
+ varIdent := args[0]
+ varName := ""
+ switch varIdent.GetExprKind().(type) {
+ case *exprpb.Expr_IdentExpr:
+ varName = varIdent.GetIdentExpr().GetName()
+ default:
+ return nil, &common.Error{
+ Message: "cel.bind() variable names must be simple identifers",
+ Location: meh.OffsetLocation(varIdent.GetId()),
+ }
+ }
+ varInit := args[1]
+ resultExpr := args[2]
+ return meh.Fold(
+ unusedIterVar,
+ meh.NewList(),
+ varName,
+ varInit,
+ meh.LiteralBool(false),
+ meh.Ident(varName),
+ resultExpr,
+ ), nil
+}
diff --git a/vendor/github.com/google/cel-go/ext/encoders.go b/vendor/github.com/google/cel-go/ext/encoders.go
index 22e38c39f9..d9f9cb5152 100644
--- a/vendor/github.com/google/cel-go/ext/encoders.go
+++ b/vendor/github.com/google/cel-go/ext/encoders.go
@@ -26,34 +26,38 @@ import (
// Encoders returns a cel.EnvOption to configure extended functions for string, byte, and object
// encodings.
//
-// Base64.Decode
+// # Base64.Decode
//
// Decodes base64-encoded string to bytes.
//
// This function will return an error if the string input is not base64-encoded.
//
-// base64.decode() ->
+// base64.decode() ->
//
// Examples:
//
-// base64.decode('aGVsbG8=') // return b'hello'
-// base64.decode('aGVsbG8') // error
+// base64.decode('aGVsbG8=') // return b'hello'
+// base64.decode('aGVsbG8') // error
//
-// Base64.Encode
+// # Base64.Encode
//
// Encodes bytes to a base64-encoded string.
//
-// base64.encode() ->
+// base64.encode() ->
//
// Examples:
//
-// base64.encode(b'hello') // return b'aGVsbG8='
+// base64.encode(b'hello') // return b'aGVsbG8='
func Encoders() cel.EnvOption {
return cel.Lib(encoderLib{})
}
type encoderLib struct{}
+func (encoderLib) LibraryName() string {
+ return "cel.lib.ext.encoders"
+}
+
func (encoderLib) CompileOptions() []cel.EnvOption {
return []cel.EnvOption{
cel.Function("base64.decode",
diff --git a/vendor/github.com/google/cel-go/ext/guards.go b/vendor/github.com/google/cel-go/ext/guards.go
index 0794f859b5..4c7786a690 100644
--- a/vendor/github.com/google/cel-go/ext/guards.go
+++ b/vendor/github.com/google/cel-go/ext/guards.go
@@ -17,6 +17,7 @@ package ext
import (
"github.com/google/cel-go/common/types"
"github.com/google/cel-go/common/types/ref"
+ exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
)
// function invocation guards for common call signatures within extension functions.
@@ -48,3 +49,15 @@ func listStringOrError(strs []string, err error) ref.Val {
}
return types.DefaultTypeAdapter.NativeToValue(strs)
}
+
+func macroTargetMatchesNamespace(ns string, target *exprpb.Expr) bool {
+ switch target.GetExprKind().(type) {
+ case *exprpb.Expr_IdentExpr:
+ if target.GetIdentExpr().GetName() != ns {
+ return false
+ }
+ return true
+ default:
+ return false
+ }
+}
diff --git a/vendor/github.com/google/cel-go/ext/math.go b/vendor/github.com/google/cel-go/ext/math.go
new file mode 100644
index 0000000000..1c8ad585a1
--- /dev/null
+++ b/vendor/github.com/google/cel-go/ext/math.go
@@ -0,0 +1,388 @@
+// Copyright 2022 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ext
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/google/cel-go/cel"
+ "github.com/google/cel-go/common"
+ "github.com/google/cel-go/common/types"
+ "github.com/google/cel-go/common/types/ref"
+ "github.com/google/cel-go/common/types/traits"
+ exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
+)
+
+// Math returns a cel.EnvOption to configure namespaced math helper macros and
+// functions.
+//
+// Note, all macros use the 'math' namespace; however, at the time of macro
+// expansion the namespace looks just like any other identifier. If you are
+// currently using a variable named 'math', the macro will likely work just as
+// intended; however, there is some chance for collision.
+//
+// # Math.Greatest
+//
+// Returns the greatest valued number present in the arguments to the macro.
+//
+// Greatest is a variable argument count macro which must take at least one
+// argument. Simple numeric and list literals are supported as valid argument
+// types; however, other literals will be flagged as errors during macro
+// expansion. If the argument expression does not resolve to a numeric or
+// list(numeric) type during type-checking, or during runtime then an error
+// will be produced. If a list argument is empty, this too will produce an
+// error.
+//
+// math.greatest(, ...) ->
+//
+// Examples:
+//
+// math.greatest(1) // 1
+// math.greatest(1u, 2u) // 2u
+// math.greatest(-42.0, -21.5, -100.0) // -21.5
+// math.greatest([-42.0, -21.5, -100.0]) // -21.5
+// math.greatest(numbers) // numbers must be list(numeric)
+//
+// math.greatest() // parse error
+// math.greatest('string') // parse error
+// math.greatest(a, b) // check-time error if a or b is non-numeric
+// math.greatest(dyn('string')) // runtime error
+//
+// # Math.Least
+//
+// Returns the least valued number present in the arguments to the macro.
+//
+// Least is a variable argument count macro which must take at least one
+// argument. Simple numeric and list literals are supported as valid argument
+// types; however, other literals will be flagged as errors during macro
+// expansion. If the argument expression does not resolve to a numeric or
+// list(numeric) type during type-checking, or during runtime then an error
+// will be produced. If a list argument is empty, this too will produce an
+// error.
+//
+// math.least(, ...) ->
+//
+// Examples:
+//
+// math.least(1) // 1
+// math.least(1u, 2u) // 1u
+// math.least(-42.0, -21.5, -100.0) // -100.0
+// math.least([-42.0, -21.5, -100.0]) // -100.0
+// math.least(numbers) // numbers must be list(numeric)
+//
+// math.least() // parse error
+// math.least('string') // parse error
+// math.least(a, b) // check-time error if a or b is non-numeric
+// math.least(dyn('string')) // runtime error
+func Math() cel.EnvOption {
+ return cel.Lib(mathLib{})
+}
+
+const (
+ mathNamespace = "math"
+ leastMacro = "least"
+ greatestMacro = "greatest"
+ minFunc = "math.@min"
+ maxFunc = "math.@max"
+)
+
+type mathLib struct{}
+
+// LibraryName implements the SingletonLibrary interface method.
+func (mathLib) LibraryName() string {
+ return "cel.lib.ext.math"
+}
+
+// CompileOptions implements the Library interface method.
+func (mathLib) CompileOptions() []cel.EnvOption {
+ return []cel.EnvOption{
+ cel.Macros(
+ // math.least(num, ...)
+ cel.NewReceiverVarArgMacro(leastMacro, mathLeast),
+ // math.greatest(num, ...)
+ cel.NewReceiverVarArgMacro(greatestMacro, mathGreatest),
+ ),
+ cel.Function(minFunc,
+ cel.Overload("math_@min_double", []*cel.Type{cel.DoubleType}, cel.DoubleType,
+ cel.UnaryBinding(identity)),
+ cel.Overload("math_@min_int", []*cel.Type{cel.IntType}, cel.IntType,
+ cel.UnaryBinding(identity)),
+ cel.Overload("math_@min_uint", []*cel.Type{cel.UintType}, cel.UintType,
+ cel.UnaryBinding(identity)),
+ cel.Overload("math_@min_double_double", []*cel.Type{cel.DoubleType, cel.DoubleType}, cel.DoubleType,
+ cel.BinaryBinding(minPair)),
+ cel.Overload("math_@min_int_int", []*cel.Type{cel.IntType, cel.IntType}, cel.IntType,
+ cel.BinaryBinding(minPair)),
+ cel.Overload("math_@min_uint_uint", []*cel.Type{cel.UintType, cel.UintType}, cel.UintType,
+ cel.BinaryBinding(minPair)),
+ cel.Overload("math_@min_int_uint", []*cel.Type{cel.IntType, cel.UintType}, cel.DynType,
+ cel.BinaryBinding(minPair)),
+ cel.Overload("math_@min_int_double", []*cel.Type{cel.IntType, cel.DoubleType}, cel.DynType,
+ cel.BinaryBinding(minPair)),
+ cel.Overload("math_@min_double_int", []*cel.Type{cel.DoubleType, cel.IntType}, cel.DynType,
+ cel.BinaryBinding(minPair)),
+ cel.Overload("math_@min_double_uint", []*cel.Type{cel.DoubleType, cel.UintType}, cel.DynType,
+ cel.BinaryBinding(minPair)),
+ cel.Overload("math_@min_uint_int", []*cel.Type{cel.UintType, cel.IntType}, cel.DynType,
+ cel.BinaryBinding(minPair)),
+ cel.Overload("math_@min_uint_double", []*cel.Type{cel.UintType, cel.DoubleType}, cel.DynType,
+ cel.BinaryBinding(minPair)),
+ cel.Overload("math_@min_list_double", []*cel.Type{cel.ListType(cel.DoubleType)}, cel.DoubleType,
+ cel.UnaryBinding(minList)),
+ cel.Overload("math_@min_list_int", []*cel.Type{cel.ListType(cel.IntType)}, cel.IntType,
+ cel.UnaryBinding(minList)),
+ cel.Overload("math_@min_list_uint", []*cel.Type{cel.ListType(cel.UintType)}, cel.UintType,
+ cel.UnaryBinding(minList)),
+ ),
+ cel.Function(maxFunc,
+ cel.Overload("math_@max_double", []*cel.Type{cel.DoubleType}, cel.DoubleType,
+ cel.UnaryBinding(identity)),
+ cel.Overload("math_@max_int", []*cel.Type{cel.IntType}, cel.IntType,
+ cel.UnaryBinding(identity)),
+ cel.Overload("math_@max_uint", []*cel.Type{cel.UintType}, cel.UintType,
+ cel.UnaryBinding(identity)),
+ cel.Overload("math_@max_double_double", []*cel.Type{cel.DoubleType, cel.DoubleType}, cel.DoubleType,
+ cel.BinaryBinding(maxPair)),
+ cel.Overload("math_@max_int_int", []*cel.Type{cel.IntType, cel.IntType}, cel.IntType,
+ cel.BinaryBinding(maxPair)),
+ cel.Overload("math_@max_uint_uint", []*cel.Type{cel.UintType, cel.UintType}, cel.UintType,
+ cel.BinaryBinding(maxPair)),
+ cel.Overload("math_@max_int_uint", []*cel.Type{cel.IntType, cel.UintType}, cel.DynType,
+ cel.BinaryBinding(maxPair)),
+ cel.Overload("math_@max_int_double", []*cel.Type{cel.IntType, cel.DoubleType}, cel.DynType,
+ cel.BinaryBinding(maxPair)),
+ cel.Overload("math_@max_double_int", []*cel.Type{cel.DoubleType, cel.IntType}, cel.DynType,
+ cel.BinaryBinding(maxPair)),
+ cel.Overload("math_@max_double_uint", []*cel.Type{cel.DoubleType, cel.UintType}, cel.DynType,
+ cel.BinaryBinding(maxPair)),
+ cel.Overload("math_@max_uint_int", []*cel.Type{cel.UintType, cel.IntType}, cel.DynType,
+ cel.BinaryBinding(maxPair)),
+ cel.Overload("math_@max_uint_double", []*cel.Type{cel.UintType, cel.DoubleType}, cel.DynType,
+ cel.BinaryBinding(maxPair)),
+ cel.Overload("math_@max_list_double", []*cel.Type{cel.ListType(cel.DoubleType)}, cel.DoubleType,
+ cel.UnaryBinding(maxList)),
+ cel.Overload("math_@max_list_int", []*cel.Type{cel.ListType(cel.IntType)}, cel.IntType,
+ cel.UnaryBinding(maxList)),
+ cel.Overload("math_@max_list_uint", []*cel.Type{cel.ListType(cel.UintType)}, cel.UintType,
+ cel.UnaryBinding(maxList)),
+ ),
+ }
+}
+
+// ProgramOptions implements the Library interface method.
+func (mathLib) ProgramOptions() []cel.ProgramOption {
+ return []cel.ProgramOption{}
+}
+
+func mathLeast(meh cel.MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *common.Error) {
+ if !macroTargetMatchesNamespace(mathNamespace, target) {
+ return nil, nil
+ }
+ switch len(args) {
+ case 0:
+ return nil, &common.Error{
+ Message: "math.least() requires at least one argument",
+ Location: meh.OffsetLocation(target.GetId()),
+ }
+ case 1:
+ if isListLiteralWithValidArgs(args[0]) || isValidArgType(args[0]) {
+ return meh.GlobalCall(minFunc, args[0]), nil
+ }
+ return nil, &common.Error{
+ Message: "math.least() invalid single argument value",
+ Location: meh.OffsetLocation(args[0].GetId()),
+ }
+ case 2:
+ err := checkInvalidArgs(meh, "math.least()", args)
+ if err != nil {
+ return nil, err
+ }
+ return meh.GlobalCall(minFunc, args...), nil
+ default:
+ err := checkInvalidArgs(meh, "math.least()", args)
+ if err != nil {
+ return nil, err
+ }
+ return meh.GlobalCall(minFunc, meh.NewList(args...)), nil
+ }
+}
+
+func mathGreatest(meh cel.MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *common.Error) {
+ if !macroTargetMatchesNamespace(mathNamespace, target) {
+ return nil, nil
+ }
+ switch len(args) {
+ case 0:
+ return nil, &common.Error{
+ Message: "math.greatest() requires at least one argument",
+ Location: meh.OffsetLocation(target.GetId()),
+ }
+ case 1:
+ if isListLiteralWithValidArgs(args[0]) || isValidArgType(args[0]) {
+ return meh.GlobalCall(maxFunc, args[0]), nil
+ }
+ return nil, &common.Error{
+ Message: "math.greatest() invalid single argument value",
+ Location: meh.OffsetLocation(args[0].GetId()),
+ }
+ case 2:
+ err := checkInvalidArgs(meh, "math.greatest()", args)
+ if err != nil {
+ return nil, err
+ }
+ return meh.GlobalCall(maxFunc, args...), nil
+ default:
+ err := checkInvalidArgs(meh, "math.greatest()", args)
+ if err != nil {
+ return nil, err
+ }
+ return meh.GlobalCall(maxFunc, meh.NewList(args...)), nil
+ }
+}
+
+func identity(val ref.Val) ref.Val {
+ return val
+}
+
+func minPair(first, second ref.Val) ref.Val {
+ cmp, ok := first.(traits.Comparer)
+ if !ok {
+ return types.MaybeNoSuchOverloadErr(first)
+ }
+ out := cmp.Compare(second)
+ if types.IsUnknownOrError(out) {
+ return maybeSuffixError(out, "math.@min")
+ }
+ if out == types.IntOne {
+ return second
+ }
+ return first
+}
+
+func minList(numList ref.Val) ref.Val {
+ l := numList.(traits.Lister)
+ size := l.Size().(types.Int)
+ if size == types.IntZero {
+ return types.NewErr("math.@min(list) argument must not be empty")
+ }
+ min := l.Get(types.IntZero)
+ for i := types.IntOne; i < size; i++ {
+ min = minPair(min, l.Get(i))
+ }
+ switch min.Type() {
+ case types.IntType, types.DoubleType, types.UintType, types.UnknownType:
+ return min
+ default:
+ return types.NewErr("no such overload: math.@min")
+ }
+}
+
+func maxPair(first, second ref.Val) ref.Val {
+ cmp, ok := first.(traits.Comparer)
+ if !ok {
+ return types.MaybeNoSuchOverloadErr(first)
+ }
+ out := cmp.Compare(second)
+ if types.IsUnknownOrError(out) {
+ return maybeSuffixError(out, "math.@max")
+ }
+ if out == types.IntNegOne {
+ return second
+ }
+ return first
+}
+
+func maxList(numList ref.Val) ref.Val {
+ l := numList.(traits.Lister)
+ size := l.Size().(types.Int)
+ if size == types.IntZero {
+ return types.NewErr("math.@max(list) argument must not be empty")
+ }
+ max := l.Get(types.IntZero)
+ for i := types.IntOne; i < size; i++ {
+ max = maxPair(max, l.Get(i))
+ }
+ switch max.Type() {
+ case types.IntType, types.DoubleType, types.UintType, types.UnknownType:
+ return max
+ default:
+ return types.NewErr("no such overload: math.@max")
+ }
+}
+
+func checkInvalidArgs(meh cel.MacroExprHelper, funcName string, args []*exprpb.Expr) *common.Error {
+ for _, arg := range args {
+ err := checkInvalidArgLiteral(funcName, arg)
+ if err != nil {
+ return &common.Error{
+ Message: err.Error(),
+ Location: meh.OffsetLocation(arg.GetId()),
+ }
+ }
+ }
+ return nil
+}
+
+func checkInvalidArgLiteral(funcName string, arg *exprpb.Expr) error {
+ if !isValidArgType(arg) {
+ return fmt.Errorf("%s simple literal arguments must be numeric", funcName)
+ }
+ return nil
+}
+
+func isValidArgType(arg *exprpb.Expr) bool {
+ switch arg.GetExprKind().(type) {
+ case *exprpb.Expr_ConstExpr:
+ c := arg.GetConstExpr()
+ switch c.GetConstantKind().(type) {
+ case *exprpb.Constant_DoubleValue, *exprpb.Constant_Int64Value, *exprpb.Constant_Uint64Value:
+ return true
+ default:
+ return false
+ }
+ case *exprpb.Expr_ListExpr, *exprpb.Expr_StructExpr:
+ return false
+ default:
+ return true
+ }
+}
+
+func isListLiteralWithValidArgs(arg *exprpb.Expr) bool {
+ switch arg.GetExprKind().(type) {
+ case *exprpb.Expr_ListExpr:
+ list := arg.GetListExpr()
+ if len(list.GetElements()) == 0 {
+ return false
+ }
+ for _, e := range list.GetElements() {
+ if !isValidArgType(e) {
+ return false
+ }
+ }
+ return true
+ }
+ return false
+}
+
+func maybeSuffixError(val ref.Val, suffix string) ref.Val {
+ if types.IsError(val) {
+ msg := val.(*types.Err).String()
+ if !strings.Contains(msg, suffix) {
+ return types.NewErr("%s: %s", msg, suffix)
+ }
+ }
+ return val
+}
diff --git a/vendor/github.com/google/cel-go/ext/native.go b/vendor/github.com/google/cel-go/ext/native.go
new file mode 100644
index 0000000000..acbc44b6d5
--- /dev/null
+++ b/vendor/github.com/google/cel-go/ext/native.go
@@ -0,0 +1,574 @@
+// Copyright 2022 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ext
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+ "time"
+
+ "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/reflect/protoreflect"
+
+ "github.com/google/cel-go/cel"
+ "github.com/google/cel-go/checker/decls"
+ "github.com/google/cel-go/common/types"
+ "github.com/google/cel-go/common/types/pb"
+ "github.com/google/cel-go/common/types/ref"
+ "github.com/google/cel-go/common/types/traits"
+
+ exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
+ structpb "google.golang.org/protobuf/types/known/structpb"
+)
+
+var (
+ nativeObjTraitMask = traits.FieldTesterType | traits.IndexerType
+ jsonValueType = reflect.TypeOf(&structpb.Value{})
+ jsonStructType = reflect.TypeOf(&structpb.Struct{})
+)
+
+// NativeTypes creates a type provider which uses reflect.Type and reflect.Value instances
+// to produce type definitions that can be used within CEL.
+//
+// All struct types in Go are exposed to CEL via their simple package name and struct type name:
+//
+// ```go
+// package identity
+//
+// type Account struct {
+// ID int
+// }
+//
+// ```
+//
+// The type `identity.Account` would be exported to CEL using the same qualified name, e.g.
+// `identity.Account{ID: 1234}` would create a new `Account` instance with the `ID` field
+// populated.
+//
+// Only exported fields are exposed via NativeTypes, and the type-mapping between Go and CEL
+// is as follows:
+//
+// | Go type | CEL type |
+// |-------------------------------------|-----------|
+// | bool | bool |
+// | []byte | bytes |
+// | float32, float64 | double |
+// | int, int8, int16, int32, int64 | int |
+// | string | string |
+// | uint, uint8, uint16, uint32, uint64 | uint |
+// | time.Duration | duration |
+// | time.Time | timestamp |
+// | array, slice | list |
+// | map | map |
+//
+// Please note, if you intend to configure support for proto messages in addition to native
+// types, you will need to provide the protobuf types before the golang native types. The
+// same advice holds if you are using custom type adapters and type providers. The native type
+// provider composes over whichever type adapter and provider is configured in the cel.Env at
+// the time that it is invoked.
+func NativeTypes(refTypes ...any) cel.EnvOption {
+ return func(env *cel.Env) (*cel.Env, error) {
+ tp, err := newNativeTypeProvider(env.TypeAdapter(), env.TypeProvider(), refTypes...)
+ if err != nil {
+ return nil, err
+ }
+ env, err = cel.CustomTypeAdapter(tp)(env)
+ if err != nil {
+ return nil, err
+ }
+ return cel.CustomTypeProvider(tp)(env)
+ }
+}
+
+func newNativeTypeProvider(adapter ref.TypeAdapter, provider ref.TypeProvider, refTypes ...any) (*nativeTypeProvider, error) {
+ nativeTypes := make(map[string]*nativeType, len(refTypes))
+ for _, refType := range refTypes {
+ switch rt := refType.(type) {
+ case reflect.Type:
+ t, err := newNativeType(rt)
+ if err != nil {
+ return nil, err
+ }
+ nativeTypes[t.TypeName()] = t
+ case reflect.Value:
+ t, err := newNativeType(rt.Type())
+ if err != nil {
+ return nil, err
+ }
+ nativeTypes[t.TypeName()] = t
+ default:
+ return nil, fmt.Errorf("unsupported native type: %v (%T) must be reflect.Type or reflect.Value", rt, rt)
+ }
+ }
+ return &nativeTypeProvider{
+ nativeTypes: nativeTypes,
+ baseAdapter: adapter,
+ baseProvider: provider,
+ }, nil
+}
+
+type nativeTypeProvider struct {
+ nativeTypes map[string]*nativeType
+ baseAdapter ref.TypeAdapter
+ baseProvider ref.TypeProvider
+}
+
+// EnumValue proxies to the ref.TypeProvider configured at the times the NativeTypes
+// option was configured.
+func (tp *nativeTypeProvider) EnumValue(enumName string) ref.Val {
+ return tp.baseProvider.EnumValue(enumName)
+}
+
+// FindIdent looks up natives type instances by qualified identifier, and if not found
+// proxies to the composed ref.TypeProvider.
+func (tp *nativeTypeProvider) FindIdent(typeName string) (ref.Val, bool) {
+ if t, found := tp.nativeTypes[typeName]; found {
+ return t, true
+ }
+ return tp.baseProvider.FindIdent(typeName)
+}
+
+// FindType looks up CEL type-checker type definition by qualified identifier, and if not found
+// proxies to the composed ref.TypeProvider.
+func (tp *nativeTypeProvider) FindType(typeName string) (*exprpb.Type, bool) {
+ if _, found := tp.nativeTypes[typeName]; found {
+ return decls.NewTypeType(decls.NewObjectType(typeName)), true
+ }
+ return tp.baseProvider.FindType(typeName)
+}
+
+// FindFieldType looks up a native type's field definition, and if the type name is not a native
+// type then proxies to the composed ref.TypeProvider
+func (tp *nativeTypeProvider) FindFieldType(typeName, fieldName string) (*ref.FieldType, bool) {
+ t, found := tp.nativeTypes[typeName]
+ if !found {
+ return tp.baseProvider.FindFieldType(typeName, fieldName)
+ }
+ refField, isDefined := t.hasField(fieldName)
+ if !found || !isDefined {
+ return nil, false
+ }
+ exprType, ok := convertToExprType(refField.Type)
+ if !ok {
+ return nil, false
+ }
+ return &ref.FieldType{
+ Type: exprType,
+ IsSet: func(obj any) bool {
+ refVal := reflect.Indirect(reflect.ValueOf(obj))
+ refField := refVal.FieldByName(fieldName)
+ return !refField.IsZero()
+ },
+ GetFrom: func(obj any) (any, error) {
+ refVal := reflect.Indirect(reflect.ValueOf(obj))
+ refField := refVal.FieldByName(fieldName)
+ return getFieldValue(tp, refField), nil
+ },
+ }, true
+}
+
+// NewValue implements the ref.TypeProvider interface method.
+func (tp *nativeTypeProvider) NewValue(typeName string, fields map[string]ref.Val) ref.Val {
+ t, found := tp.nativeTypes[typeName]
+ if !found {
+ return tp.baseProvider.NewValue(typeName, fields)
+ }
+ refPtr := reflect.New(t.refType)
+ refVal := refPtr.Elem()
+ for fieldName, val := range fields {
+ refFieldDef, isDefined := t.hasField(fieldName)
+ if !isDefined {
+ return types.NewErr("no such field: %s", fieldName)
+ }
+ fieldVal, err := val.ConvertToNative(refFieldDef.Type)
+ if err != nil {
+ return types.NewErr(err.Error())
+ }
+ refField := refVal.FieldByIndex(refFieldDef.Index)
+ refFieldVal := reflect.ValueOf(fieldVal)
+ refField.Set(refFieldVal)
+ }
+ return tp.NativeToValue(refPtr.Interface())
+}
+
+// NewValue adapts native values to CEL values and will proxy to the composed type adapter
+// for non-native types.
+func (tp *nativeTypeProvider) NativeToValue(val any) ref.Val {
+ if val == nil {
+ return types.NullValue
+ }
+ if v, ok := val.(ref.Val); ok {
+ return v
+ }
+ rawVal := reflect.ValueOf(val)
+ refVal := rawVal
+ if refVal.Kind() == reflect.Ptr {
+ refVal = reflect.Indirect(refVal)
+ }
+ // This isn't quite right if you're also supporting proto,
+ // but maybe an acceptable limitation.
+ switch refVal.Kind() {
+ case reflect.Array, reflect.Slice:
+ switch val := val.(type) {
+ case []byte:
+ return tp.baseAdapter.NativeToValue(val)
+ default:
+ return types.NewDynamicList(tp, val)
+ }
+ case reflect.Map:
+ return types.NewDynamicMap(tp, val)
+ case reflect.Struct:
+ switch val := val.(type) {
+ case proto.Message, *pb.Map, protoreflect.List, protoreflect.Message, protoreflect.Value,
+ time.Time:
+ return tp.baseAdapter.NativeToValue(val)
+ default:
+ return newNativeObject(tp, val, rawVal)
+ }
+ default:
+ return tp.baseAdapter.NativeToValue(val)
+ }
+}
+
+// convertToExprType converts the Golang reflect.Type to a protobuf exprpb.Type.
+func convertToExprType(refType reflect.Type) (*exprpb.Type, bool) {
+ switch refType.Kind() {
+ case reflect.Bool:
+ return decls.Bool, true
+ case reflect.Float32, reflect.Float64:
+ return decls.Double, true
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ if refType == durationType {
+ return decls.Duration, true
+ }
+ return decls.Int, true
+ case reflect.String:
+ return decls.String, true
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ return decls.Uint, true
+ case reflect.Array, reflect.Slice:
+ refElem := refType.Elem()
+ if refElem == reflect.TypeOf(byte(0)) {
+ return decls.Bytes, true
+ }
+ elemType, ok := convertToExprType(refElem)
+ if !ok {
+ return nil, false
+ }
+ return decls.NewListType(elemType), true
+ case reflect.Map:
+ keyType, ok := convertToExprType(refType.Key())
+ if !ok {
+ return nil, false
+ }
+ // Ensure the key type is a int, bool, uint, string
+ elemType, ok := convertToExprType(refType.Elem())
+ if !ok {
+ return nil, false
+ }
+ return decls.NewMapType(keyType, elemType), true
+ case reflect.Struct:
+ if refType == timestampType {
+ return decls.Timestamp, true
+ }
+ return decls.NewObjectType(
+ fmt.Sprintf("%s.%s", simplePkgAlias(refType.PkgPath()), refType.Name()),
+ ), true
+ case reflect.Pointer:
+ if refType.Implements(pbMsgInterfaceType) {
+ pbMsg := reflect.New(refType.Elem()).Interface().(protoreflect.ProtoMessage)
+ return decls.NewObjectType(string(pbMsg.ProtoReflect().Descriptor().FullName())), true
+ }
+ return convertToExprType(refType.Elem())
+ }
+ return nil, false
+}
+
+func newNativeObject(adapter ref.TypeAdapter, val any, refValue reflect.Value) ref.Val {
+ valType, err := newNativeType(refValue.Type())
+ if err != nil {
+ return types.NewErr(err.Error())
+ }
+ return &nativeObj{
+ TypeAdapter: adapter,
+ val: val,
+ valType: valType,
+ refValue: refValue,
+ }
+}
+
+type nativeObj struct {
+ ref.TypeAdapter
+ val any
+ valType *nativeType
+ refValue reflect.Value
+}
+
+// ConvertToNative implements the ref.Val interface method.
+//
+// CEL does not have a notion of pointers, so whether a field is a pointer or value
+// is handled as part of this conversion step.
+func (o *nativeObj) ConvertToNative(typeDesc reflect.Type) (any, error) {
+ if o.refValue.Type() == typeDesc {
+ return o.val, nil
+ }
+ if o.refValue.Kind() == reflect.Pointer && o.refValue.Type().Elem() == typeDesc {
+ return o.refValue.Elem().Interface(), nil
+ }
+ if typeDesc.Kind() == reflect.Pointer && o.refValue.Type() == typeDesc.Elem() {
+ ptr := reflect.New(typeDesc.Elem())
+ ptr.Elem().Set(o.refValue)
+ return ptr.Interface(), nil
+ }
+ switch typeDesc {
+ case jsonValueType:
+ jsonStruct, err := o.ConvertToNative(jsonStructType)
+ if err != nil {
+ return nil, err
+ }
+ return structpb.NewStructValue(jsonStruct.(*structpb.Struct)), nil
+ case jsonStructType:
+ refVal := reflect.Indirect(o.refValue)
+ refType := refVal.Type()
+ fields := make(map[string]*structpb.Value, refVal.NumField())
+ for i := 0; i < refVal.NumField(); i++ {
+ fieldType := refType.Field(i)
+ fieldValue := refVal.Field(i)
+ if !fieldValue.IsValid() || fieldValue.IsZero() {
+ continue
+ }
+ fieldCELVal := o.NativeToValue(fieldValue.Interface())
+ fieldJSONVal, err := fieldCELVal.ConvertToNative(jsonValueType)
+ if err != nil {
+ return nil, err
+ }
+ fields[fieldType.Name] = fieldJSONVal.(*structpb.Value)
+ }
+ return &structpb.Struct{Fields: fields}, nil
+ }
+ return nil, fmt.Errorf("type conversion error from '%v' to '%v'", o.Type(), typeDesc)
+}
+
+// ConvertToType implements the ref.Val interface method.
+func (o *nativeObj) ConvertToType(typeVal ref.Type) ref.Val {
+ switch typeVal {
+ case types.TypeType:
+ return o.valType
+ default:
+ if typeVal.TypeName() == o.valType.typeName {
+ return o
+ }
+ }
+ return types.NewErr("type conversion error from '%s' to '%s'", o.Type(), typeVal)
+}
+
+// Equal implements the ref.Val interface method.
+//
+// Note, that in Golang a pointer to a value is not equal to the value it contains.
+// In CEL pointers and values to which they point are equal.
+func (o *nativeObj) Equal(other ref.Val) ref.Val {
+ otherNtv, ok := other.(*nativeObj)
+ if !ok {
+ return types.False
+ }
+ val := o.val
+ otherVal := otherNtv.val
+ refVal := o.refValue
+ otherRefVal := otherNtv.refValue
+ if refVal.Kind() != otherRefVal.Kind() {
+ if refVal.Kind() == reflect.Pointer {
+ val = refVal.Elem().Interface()
+ } else if otherRefVal.Kind() == reflect.Pointer {
+ otherVal = otherRefVal.Elem().Interface()
+ }
+ }
+ return types.Bool(reflect.DeepEqual(val, otherVal))
+}
+
+// IsZeroValue indicates whether the contained Golang value is a zero value.
+//
+// Golang largely follows proto3 semantics for zero values.
+func (o *nativeObj) IsZeroValue() bool {
+ return reflect.Indirect(o.refValue).IsZero()
+}
+
+// IsSet tests whether a field which is defined is set to a non-default value.
+func (o *nativeObj) IsSet(field ref.Val) ref.Val {
+ refField, refErr := o.getReflectedField(field)
+ if refErr != nil {
+ return refErr
+ }
+ return types.Bool(!refField.IsZero())
+}
+
+// Get returns the value fo a field name.
+func (o *nativeObj) Get(field ref.Val) ref.Val {
+ refField, refErr := o.getReflectedField(field)
+ if refErr != nil {
+ return refErr
+ }
+ return adaptFieldValue(o, refField)
+}
+
+func (o *nativeObj) getReflectedField(field ref.Val) (reflect.Value, ref.Val) {
+ fieldName, ok := field.(types.String)
+ if !ok {
+ return reflect.Value{}, types.MaybeNoSuchOverloadErr(field)
+ }
+ fieldNameStr := string(fieldName)
+ refField, isDefined := o.valType.hasField(fieldNameStr)
+ if !isDefined {
+ return reflect.Value{}, types.NewErr("no such field: %s", fieldName)
+ }
+ refVal := reflect.Indirect(o.refValue)
+ return refVal.FieldByIndex(refField.Index), nil
+}
+
+// Type implements the ref.Val interface method.
+func (o *nativeObj) Type() ref.Type {
+ return o.valType
+}
+
+// Value implements the ref.Val interface method.
+func (o *nativeObj) Value() any {
+ return o.val
+}
+
+func newNativeType(rawType reflect.Type) (*nativeType, error) {
+ refType := rawType
+ if refType.Kind() == reflect.Pointer {
+ refType = refType.Elem()
+ }
+ if !isValidObjectType(refType) {
+ return nil, fmt.Errorf("unsupported reflect.Type %v, must be reflect.Struct", rawType)
+ }
+ return &nativeType{
+ typeName: fmt.Sprintf("%s.%s", simplePkgAlias(refType.PkgPath()), refType.Name()),
+ refType: refType,
+ }, nil
+}
+
+type nativeType struct {
+ typeName string
+ refType reflect.Type
+}
+
+// ConvertToNative implements ref.Val.ConvertToNative.
+func (t *nativeType) ConvertToNative(typeDesc reflect.Type) (any, error) {
+ return nil, fmt.Errorf("type conversion error for type to '%v'", typeDesc)
+}
+
+// ConvertToType implements ref.Val.ConvertToType.
+func (t *nativeType) ConvertToType(typeVal ref.Type) ref.Val {
+ switch typeVal {
+ case types.TypeType:
+ return types.TypeType
+ }
+ return types.NewErr("type conversion error from '%s' to '%s'", types.TypeType, typeVal)
+}
+
+// Equal returns true of both type names are equal to each other.
+func (t *nativeType) Equal(other ref.Val) ref.Val {
+ otherType, ok := other.(ref.Type)
+ return types.Bool(ok && t.TypeName() == otherType.TypeName())
+}
+
+// HasTrait implements the ref.Type interface method.
+func (t *nativeType) HasTrait(trait int) bool {
+ return nativeObjTraitMask&trait == trait
+}
+
+// String implements the strings.Stringer interface method.
+func (t *nativeType) String() string {
+ return t.typeName
+}
+
+// Type implements the ref.Val interface method.
+func (t *nativeType) Type() ref.Type {
+ return types.TypeType
+}
+
+// TypeName implements the ref.Type interface method.
+func (t *nativeType) TypeName() string {
+ return t.typeName
+}
+
+// Value implements the ref.Val interface method.
+func (t *nativeType) Value() any {
+ return t.typeName
+}
+
+// hasField returns whether a field name has a corresponding Golang reflect.StructField
+func (t *nativeType) hasField(fieldName string) (reflect.StructField, bool) {
+ f, found := t.refType.FieldByName(fieldName)
+ if !found || !f.IsExported() || !isSupportedType(f.Type) {
+ return reflect.StructField{}, false
+ }
+ return f, true
+}
+
+func adaptFieldValue(adapter ref.TypeAdapter, refField reflect.Value) ref.Val {
+ return adapter.NativeToValue(getFieldValue(adapter, refField))
+}
+
+func getFieldValue(adapter ref.TypeAdapter, refField reflect.Value) any {
+ if refField.IsZero() {
+ switch refField.Kind() {
+ case reflect.Array, reflect.Slice:
+ return types.NewDynamicList(adapter, []ref.Val{})
+ case reflect.Map:
+ return types.NewDynamicMap(adapter, map[ref.Val]ref.Val{})
+ case reflect.Struct:
+ if refField.Type() == timestampType {
+ return types.Timestamp{Time: time.Unix(0, 0)}
+ }
+ return reflect.New(refField.Type()).Elem().Interface()
+ case reflect.Pointer:
+ return reflect.New(refField.Type().Elem()).Interface()
+ }
+ }
+ return refField.Interface()
+}
+
+func simplePkgAlias(pkgPath string) string {
+ paths := strings.Split(pkgPath, "/")
+ if len(paths) == 0 {
+ return ""
+ }
+ return paths[len(paths)-1]
+}
+
+func isValidObjectType(refType reflect.Type) bool {
+ return refType.Kind() == reflect.Struct
+}
+
+func isSupportedType(refType reflect.Type) bool {
+ switch refType.Kind() {
+ case reflect.Chan, reflect.Complex64, reflect.Complex128, reflect.Func, reflect.UnsafePointer, reflect.Uintptr:
+ return false
+ case reflect.Array, reflect.Slice:
+ return isSupportedType(refType.Elem())
+ case reflect.Map:
+ return isSupportedType(refType.Key()) && isSupportedType(refType.Elem())
+ }
+ return true
+}
+
+var (
+ pbMsgInterfaceType = reflect.TypeOf((*protoreflect.ProtoMessage)(nil)).Elem()
+ timestampType = reflect.TypeOf(time.Now())
+ durationType = reflect.TypeOf(time.Nanosecond)
+)
diff --git a/vendor/github.com/google/cel-go/ext/protos.go b/vendor/github.com/google/cel-go/ext/protos.go
new file mode 100644
index 0000000000..b905e710c1
--- /dev/null
+++ b/vendor/github.com/google/cel-go/ext/protos.go
@@ -0,0 +1,145 @@
+// Copyright 2022 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ext
+
+import (
+ "github.com/google/cel-go/cel"
+ "github.com/google/cel-go/common"
+
+ exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
+)
+
+// Protos returns a cel.EnvOption to configure extended macros and functions for
+// proto manipulation.
+//
+// Note, all macros use the 'proto' namespace; however, at the time of macro
+// expansion the namespace looks just like any other identifier. If you are
+// currently using a variable named 'proto', the macro will likely work just as
+// intended; however, there is some chance for collision.
+//
+// # Protos.GetExt
+//
+// Macro which generates a select expression that retrieves an extension field
+// from the input proto2 syntax message. If the field is not set, the default
+// value forthe extension field is returned according to safe-traversal semantics.
+//
+// proto.getExt(, ) ->
+//
+// Examples:
+//
+// proto.getExt(msg, google.expr.proto2.test.int32_ext) // returns int value
+//
+// # Protos.HasExt
+//
+// Macro which generates a test-only select expression that determines whether
+// an extension field is set on a proto2 syntax message.
+//
+// proto.hasExt(, ) ->
+//
+// Examples:
+//
+// proto.hasExt(msg, google.expr.proto2.test.int32_ext) // returns true || false
+func Protos() cel.EnvOption {
+ return cel.Lib(protoLib{})
+}
+
+var (
+ protoNamespace = "proto"
+ hasExtension = "hasExt"
+ getExtension = "getExt"
+)
+
+type protoLib struct{}
+
+// LibraryName implements the SingletonLibrary interface method.
+func (protoLib) LibraryName() string {
+ return "cel.lib.ext.protos"
+}
+
+// CompileOptions implements the Library interface method.
+func (protoLib) CompileOptions() []cel.EnvOption {
+ return []cel.EnvOption{
+ cel.Macros(
+ // proto.getExt(msg, select_expression)
+ cel.NewReceiverMacro(getExtension, 2, getProtoExt),
+ // proto.hasExt(msg, select_expression)
+ cel.NewReceiverMacro(hasExtension, 2, hasProtoExt),
+ ),
+ }
+}
+
+// ProgramOptions implements the Library interface method.
+func (protoLib) ProgramOptions() []cel.ProgramOption {
+ return []cel.ProgramOption{}
+}
+
+// hasProtoExt generates a test-only select expression for a fully-qualified extension name on a protobuf message.
+func hasProtoExt(meh cel.MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *common.Error) {
+ if !macroTargetMatchesNamespace(protoNamespace, target) {
+ return nil, nil
+ }
+ extensionField, err := getExtFieldName(meh, args[1])
+ if err != nil {
+ return nil, err
+ }
+ return meh.PresenceTest(args[0], extensionField), nil
+}
+
+// getProtoExt generates a select expression for a fully-qualified extension name on a protobuf message.
+func getProtoExt(meh cel.MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *common.Error) {
+ if !macroTargetMatchesNamespace(protoNamespace, target) {
+ return nil, nil
+ }
+ extFieldName, err := getExtFieldName(meh, args[1])
+ if err != nil {
+ return nil, err
+ }
+ return meh.Select(args[0], extFieldName), nil
+}
+
+func getExtFieldName(meh cel.MacroExprHelper, expr *exprpb.Expr) (string, *common.Error) {
+ isValid := false
+ extensionField := ""
+ switch expr.GetExprKind().(type) {
+ case *exprpb.Expr_SelectExpr:
+ extensionField, isValid = validateIdentifier(expr)
+ }
+ if !isValid {
+ return "", &common.Error{
+ Message: "invalid extension field",
+ Location: meh.OffsetLocation(expr.GetId()),
+ }
+ }
+ return extensionField, nil
+}
+
+func validateIdentifier(expr *exprpb.Expr) (string, bool) {
+ switch expr.GetExprKind().(type) {
+ case *exprpb.Expr_IdentExpr:
+ return expr.GetIdentExpr().GetName(), true
+ case *exprpb.Expr_SelectExpr:
+ sel := expr.GetSelectExpr()
+ if sel.GetTestOnly() {
+ return "", false
+ }
+ opStr, isIdent := validateIdentifier(sel.GetOperand())
+ if !isIdent {
+ return "", false
+ }
+ return opStr + "." + sel.GetField(), true
+ default:
+ return "", false
+ }
+}
diff --git a/vendor/github.com/google/cel-go/ext/sets.go b/vendor/github.com/google/cel-go/ext/sets.go
new file mode 100644
index 0000000000..4820d6199e
--- /dev/null
+++ b/vendor/github.com/google/cel-go/ext/sets.go
@@ -0,0 +1,138 @@
+// Copyright 2023 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ext
+
+import (
+ "github.com/google/cel-go/cel"
+ "github.com/google/cel-go/common/types"
+ "github.com/google/cel-go/common/types/ref"
+ "github.com/google/cel-go/common/types/traits"
+)
+
+// Sets returns a cel.EnvOption to configure namespaced set relationship
+// functions.
+//
+// There is no set type within CEL, and while one may be introduced in the
+// future, there are cases where a `list` type is known to behave like a set.
+// For such cases, this library provides some basic functionality for
+// determining set containment, equivalence, and intersection.
+//
+// # Sets.Contains
+//
+// Returns whether the first list argument contains all elements in the second
+// list argument. The list may contain elements of any type and standard CEL
+// equality is used to determine whether a value exists in both lists. If the
+// second list is empty, the result will always return true.
+//
+// sets.contains(list(T), list(T)) -> bool
+//
+// Examples:
+//
+// sets.contains([], []) // true
+// sets.contains([], [1]) // false
+// sets.contains([1, 2, 3, 4], [2, 3]) // true
+// sets.contains([1, 2.0, 3u], [1.0, 2u, 3]) // true
+//
+// # Sets.Equivalent
+//
+// Returns whether the first and second list are set equivalent. Lists are set
+// equivalent if for every item in the first list, there is an element in the
+// second which is equal. The lists may not be of the same size as they do not
+// guarantee the elements within them are unique, so size does not factor into
+// the computation.
+//
+// Examples:
+//
+// sets.equivalent([], []) // true
+// sets.equivalent([1], [1, 1]) // true
+// sets.equivalent([1], [1u, 1.0]) // true
+// sets.equivalent([1, 2, 3], [3u, 2.0, 1]) // true
+//
+// # Sets.Intersects
+//
+// Returns whether the first list has at least one element whose value is equal
+// to an element in the second list. If either list is empty, the result will
+// be false.
+//
+// Examples:
+//
+// sets.intersects([1], []) // false
+// sets.intersects([1], [1, 2]) // true
+// sets.intersects([[1], [2, 3]], [[1, 2], [2, 3.0]]) // true
+func Sets() cel.EnvOption {
+ return cel.Lib(setsLib{})
+}
+
+type setsLib struct{}
+
+// LibraryName implements the SingletonLibrary interface method.
+func (setsLib) LibraryName() string {
+ return "cel.lib.ext.sets"
+}
+
+// CompileOptions implements the Library interface method.
+func (setsLib) CompileOptions() []cel.EnvOption {
+ listType := cel.ListType(cel.TypeParamType("T"))
+ return []cel.EnvOption{
+ cel.Function("sets.contains",
+ cel.Overload("list_sets_contains_list", []*cel.Type{listType, listType}, cel.BoolType,
+ cel.BinaryBinding(setsContains))),
+ cel.Function("sets.equivalent",
+ cel.Overload("list_sets_equivalent_list", []*cel.Type{listType, listType}, cel.BoolType,
+ cel.BinaryBinding(setsEquivalent))),
+ cel.Function("sets.intersects",
+ cel.Overload("list_sets_intersects_list", []*cel.Type{listType, listType}, cel.BoolType,
+ cel.BinaryBinding(setsIntersects))),
+ }
+}
+
+// ProgramOptions implements the Library interface method.
+func (setsLib) ProgramOptions() []cel.ProgramOption {
+ return []cel.ProgramOption{}
+}
+
+func setsIntersects(listA, listB ref.Val) ref.Val {
+ lA := listA.(traits.Lister)
+ lB := listB.(traits.Lister)
+ it := lA.Iterator()
+ for it.HasNext() == types.True {
+ exists := lB.Contains(it.Next())
+ if exists == types.True {
+ return types.True
+ }
+ }
+ return types.False
+}
+
+func setsContains(list, sublist ref.Val) ref.Val {
+ l := list.(traits.Lister)
+ sub := sublist.(traits.Lister)
+ it := sub.Iterator()
+ for it.HasNext() == types.True {
+ exists := l.Contains(it.Next())
+ if exists != types.True {
+ return exists
+ }
+ }
+ return types.True
+}
+
+func setsEquivalent(listA, listB ref.Val) ref.Val {
+ aContainsB := setsContains(listA, listB)
+ if aContainsB != types.True {
+ return aContainsB
+ }
+ return setsContains(listB, listA)
+}
diff --git a/vendor/github.com/google/cel-go/ext/strings.go b/vendor/github.com/google/cel-go/ext/strings.go
index 6ce239ac2b..8455d58290 100644
--- a/vendor/github.com/google/cel-go/ext/strings.go
+++ b/vendor/github.com/google/cel-go/ext/strings.go
@@ -19,32 +19,92 @@ package ext
import (
"fmt"
+ "math"
"reflect"
+ "sort"
"strings"
"unicode"
+ "unicode/utf8"
+
+ "golang.org/x/text/language"
+ "golang.org/x/text/message"
"github.com/google/cel-go/cel"
"github.com/google/cel-go/common/types"
"github.com/google/cel-go/common/types/ref"
+ "github.com/google/cel-go/common/types/traits"
+ "github.com/google/cel-go/interpreter"
+)
+
+const (
+ defaultLocale = "en-US"
+ defaultPrecision = 6
)
// Strings returns a cel.EnvOption to configure extended functions for string manipulation.
// As a general note, all indices are zero-based.
//
-// CharAt
+// # CharAt
//
// Returns the character at the given position. If the position is negative, or greater than
// the length of the string, the function will produce an error:
//
-// .charAt() ->
+// .charAt() ->
//
// Examples:
//
-// 'hello'.charAt(4) // return 'o'
-// 'hello'.charAt(5) // return ''
-// 'hello'.charAt(-1) // error
+// 'hello'.charAt(4) // return 'o'
+// 'hello'.charAt(5) // return ''
+// 'hello'.charAt(-1) // error
+//
+// # Format
+//
+// Introduced at version: 1
+//
+// Returns a new string with substitutions being performed, printf-style.
+// The valid formatting clauses are:
+//
+// `%s` - substitutes a string. This can also be used on bools, lists, maps, bytes,
+// Duration and Timestamp, in addition to all numerical types (int, uint, and double).
+// Note that the dot/period decimal separator will always be used when printing a list
+// or map that contains a double, and that null can be passed (which results in the
+// string "null") in addition to types.
+// `%d` - substitutes an integer.
+// `%f` - substitutes a double with fixed-point precision. The default precision is 6, but
+// this can be adjusted. The strings `Infinity`, `-Infinity`, and `NaN` are also valid input
+// for this clause.
+// `%e` - substitutes a double in scientific notation. The default precision is 6, but this
+// can be adjusted.
+// `%b` - substitutes an integer with its equivalent binary string. Can also be used on bools.
+// `%x` - substitutes an integer with its equivalent in hexadecimal, or if given a string or
+// bytes, will output each character's equivalent in hexadecimal.
+// `%X` - same as above, but with A-F capitalized.
+// `%o` - substitutes an integer with its equivalent in octal.
+//
+// .format() ->
+//
+// Examples:
//
-// IndexOf
+// "this is a string: %s\nand an integer: %d".format(["str", 42]) // returns "this is a string: str\nand an integer: 42"
+// "a double substituted with %%s: %s".format([64.2]) // returns "a double substituted with %s: 64.2"
+// "string type: %s".format([type(string)]) // returns "string type: string"
+// "timestamp: %s".format([timestamp("2023-02-03T23:31:20+00:00")]) // returns "timestamp: 2023-02-03T23:31:20Z"
+// "duration: %s".format([duration("1h45m47s")]) // returns "duration: 6347s"
+// "%f".format([3.14]) // returns "3.140000"
+// "scientific notation: %e".format([2.71828]) // returns "scientific notation: 2.718280\u202f\u00d7\u202f10\u2070\u2070"
+// "5 in binary: %b".format([5]), // returns "5 in binary; 101"
+// "26 in hex: %x".format([26]), // returns "26 in hex: 1a"
+// "26 in hex (uppercase): %X".format([26]) // returns "26 in hex (uppercase): 1A"
+// "30 in octal: %o".format([30]) // returns "30 in octal: 36"
+// "a map inside a list: %s".format([[1, 2, 3, {"a": "x", "b": "y", "c": "z"}]]) // returns "a map inside a list: [1, 2, 3, {"a":"x", "b":"y", "c":"d"}]"
+// "true bool: %s - false bool: %s\nbinary bool: %b".format([true, false, true]) // returns "true bool: true - false bool: false\nbinary bool: 1"
+//
+// Passing an incorrect type (an integer to `%s`) is considered an error, as well as attempting
+// to use more formatting clauses than there are arguments (`%d %d %d` while passing two ints, for instance).
+// If compile-time checking is enabled, and the formatting string is a constant, and the argument list is a literal,
+// then letting any arguments go unused/unformatted is also considered an error.
+//
+// # IndexOf
//
// Returns the integer index of the first occurrence of the search string. If the search string is
// not found the function returns -1.
@@ -52,19 +112,19 @@ import (
// The function also accepts an optional position from which to begin the substring search. If the
// substring is the empty string, the index where the search starts is returned (zero or custom).
//
-// .indexOf() ->
-// .indexOf(, ) ->
+// .indexOf() ->
+// .indexOf(, ) ->
//
// Examples:
//
-// 'hello mellow'.indexOf('') // returns 0
-// 'hello mellow'.indexOf('ello') // returns 1
-// 'hello mellow'.indexOf('jello') // returns -1
-// 'hello mellow'.indexOf('', 2) // returns 2
-// 'hello mellow'.indexOf('ello', 2) // returns 7
-// 'hello mellow'.indexOf('ello', 20) // error
+// 'hello mellow'.indexOf('') // returns 0
+// 'hello mellow'.indexOf('ello') // returns 1
+// 'hello mellow'.indexOf('jello') // returns -1
+// 'hello mellow'.indexOf('', 2) // returns 2
+// 'hello mellow'.indexOf('ello', 2) // returns 7
+// 'hello mellow'.indexOf('ello', 20) // error
//
-// Join
+// # Join
//
// Returns a new string where the elements of string list are concatenated.
//
@@ -75,12 +135,12 @@ import (
//
// Examples:
//
-// ['hello', 'mellow'].join() // returns 'hellomellow'
-// ['hello', 'mellow'].join(' ') // returns 'hello mellow'
-// [].join() // returns ''
-// [].join('/') // returns ''
+// ['hello', 'mellow'].join() // returns 'hellomellow'
+// ['hello', 'mellow'].join(' ') // returns 'hello mellow'
+// [].join() // returns ''
+// [].join('/') // returns ''
//
-// LastIndexOf
+// # LastIndexOf
//
// Returns the integer index at the start of the last occurrence of the search string. If the
// search string is not found the function returns -1.
@@ -89,31 +149,45 @@ import (
// considered as the beginning of the substring match. If the substring is the empty string,
// the index where the search starts is returned (string length or custom).
//
-// .lastIndexOf() ->
-// .lastIndexOf(, ) ->
+// .lastIndexOf() ->
+// .lastIndexOf(, ) ->
//
// Examples:
//
-// 'hello mellow'.lastIndexOf('') // returns 12
-// 'hello mellow'.lastIndexOf('ello') // returns 7
-// 'hello mellow'.lastIndexOf('jello') // returns -1
-// 'hello mellow'.lastIndexOf('ello', 6) // returns 1
-// 'hello mellow'.lastIndexOf('ello', -1) // error
+// 'hello mellow'.lastIndexOf('') // returns 12
+// 'hello mellow'.lastIndexOf('ello') // returns 7
+// 'hello mellow'.lastIndexOf('jello') // returns -1
+// 'hello mellow'.lastIndexOf('ello', 6) // returns 1
+// 'hello mellow'.lastIndexOf('ello', -1) // error
//
-// LowerAscii
+// # LowerAscii
//
// Returns a new string where all ASCII characters are lower-cased.
//
// This function does not perform Unicode case-mapping for characters outside the ASCII range.
//
-// .lowerAscii() ->
+// .lowerAscii() ->
+//
+// Examples:
+//
+// 'TacoCat'.lowerAscii() // returns 'tacocat'
+// 'TacoCÆt Xii'.lowerAscii() // returns 'tacocÆt xii'
+//
+// # Quote
+//
+// Introduced in version: 1
+//
+// Takes the given string and makes it safe to print (without any formatting due to escape sequences).
+// If any invalid UTF-8 characters are encountered, they are replaced with \uFFFD.
+//
+// strings.quote()
//
// Examples:
//
-// 'TacoCat'.lowerAscii() // returns 'tacocat'
-// 'TacoCÆt Xii'.lowerAscii() // returns 'tacocÆt xii'
+// strings.quote('single-quote with "double quote"') // returns '"single-quote with \"double quote\""'
+// strings.quote("two escape sequences \a\n") // returns '"two escape sequences \\a\\n"'
//
-// Replace
+// # Replace
//
// Returns a new string based on the target, which replaces the occurrences of a search string
// with a replacement string if present. The function accepts an optional limit on the number of
@@ -122,17 +196,17 @@ import (
// When the replacement limit is 0, the result is the original string. When the limit is a negative
// number, the function behaves the same as replace all.
//
-// .replace(, ) ->
-// .replace(, , ) ->
+// .replace(, ) ->
+// .replace(, , ) ->
//
// Examples:
//
-// 'hello hello'.replace('he', 'we') // returns 'wello wello'
-// 'hello hello'.replace('he', 'we', -1) // returns 'wello wello'
-// 'hello hello'.replace('he', 'we', 1) // returns 'wello hello'
-// 'hello hello'.replace('he', 'we', 0) // returns 'hello hello'
+// 'hello hello'.replace('he', 'we') // returns 'wello wello'
+// 'hello hello'.replace('he', 'we', -1) // returns 'wello wello'
+// 'hello hello'.replace('he', 'we', 1) // returns 'wello hello'
+// 'hello hello'.replace('he', 'we', 0) // returns 'hello hello'
//
-// Split
+// # Split
//
// Returns a list of strings split from the input by the given separator. The function accepts
// an optional argument specifying a limit on the number of substrings produced by the split.
@@ -141,18 +215,18 @@ import (
// target string to split. When the limit is a negative number, the function behaves the same as
// split all.
//
-// .split() -> >
-// .split(, ) -> >
+// .split() -> >
+// .split(, ) -> >
//
// Examples:
//
-// 'hello hello hello'.split(' ') // returns ['hello', 'hello', 'hello']
-// 'hello hello hello'.split(' ', 0) // returns []
-// 'hello hello hello'.split(' ', 1) // returns ['hello hello hello']
-// 'hello hello hello'.split(' ', 2) // returns ['hello', 'hello hello']
-// 'hello hello hello'.split(' ', -1) // returns ['hello', 'hello', 'hello']
+// 'hello hello hello'.split(' ') // returns ['hello', 'hello', 'hello']
+// 'hello hello hello'.split(' ', 0) // returns []
+// 'hello hello hello'.split(' ', 1) // returns ['hello hello hello']
+// 'hello hello hello'.split(' ', 2) // returns ['hello', 'hello hello']
+// 'hello hello hello'.split(' ', -1) // returns ['hello', 'hello', 'hello']
//
-// Substring
+// # Substring
//
// Returns the substring given a numeric range corresponding to character positions. Optionally
// may omit the trailing range for a substring from a given character position until the end of
@@ -162,48 +236,102 @@ import (
// error to specify an end range that is lower than the start range, or for either the start or end
// index to be negative or exceed the string length.
//
-// .substring() ->
-// .substring(, ) ->
+// .substring() ->
+// .substring(, ) ->
//
// Examples:
//
-// 'tacocat'.substring(4) // returns 'cat'
-// 'tacocat'.substring(0, 4) // returns 'taco'
-// 'tacocat'.substring(-1) // error
-// 'tacocat'.substring(2, 1) // error
+// 'tacocat'.substring(4) // returns 'cat'
+// 'tacocat'.substring(0, 4) // returns 'taco'
+// 'tacocat'.substring(-1) // error
+// 'tacocat'.substring(2, 1) // error
//
-// Trim
+// # Trim
//
// Returns a new string which removes the leading and trailing whitespace in the target string.
// The trim function uses the Unicode definition of whitespace which does not include the
// zero-width spaces. See: https://en.wikipedia.org/wiki/Whitespace_character#Unicode
//
-// .trim() ->
+// .trim() ->
//
// Examples:
//
-// ' \ttrim\n '.trim() // returns 'trim'
+// ' \ttrim\n '.trim() // returns 'trim'
//
-// UpperAscii
+// # UpperAscii
//
// Returns a new string where all ASCII characters are upper-cased.
//
// This function does not perform Unicode case-mapping for characters outside the ASCII range.
//
-// .upperAscii() ->
+// .upperAscii() ->
//
// Examples:
//
-// 'TacoCat'.upperAscii() // returns 'TACOCAT'
-// 'TacoCÆt Xii'.upperAscii() // returns 'TACOCÆT XII'
-func Strings() cel.EnvOption {
- return cel.Lib(stringLib{})
+// 'TacoCat'.upperAscii() // returns 'TACOCAT'
+// 'TacoCÆt Xii'.upperAscii() // returns 'TACOCÆT XII'
+func Strings(options ...StringsOption) cel.EnvOption {
+ s := &stringLib{version: math.MaxUint32}
+ for _, o := range options {
+ s = o(s)
+ }
+ return cel.Lib(s)
+}
+
+type stringLib struct {
+ locale string
+ version uint32
+}
+
+// LibraryName implements the SingletonLibrary interface method.
+func (*stringLib) LibraryName() string {
+ return "cel.lib.ext.strings"
}
-type stringLib struct{}
+// StringsOption is a functional interface for configuring the strings library.
+type StringsOption func(*stringLib) *stringLib
-func (stringLib) CompileOptions() []cel.EnvOption {
- return []cel.EnvOption{
+// StringsLocale configures the library with the given locale. The locale tag will
+// be checked for validity at the time that EnvOptions are configured. If this option
+// is not passed, string.format will behave as if en_US was passed as the locale.
+func StringsLocale(locale string) StringsOption {
+ return func(sl *stringLib) *stringLib {
+ sl.locale = locale
+ return sl
+ }
+}
+
+// StringsVersion configures the version of the string library. The version limits which
+// functions are available. Only functions introduced below or equal to the given
+// version included in the library. See the library documentation to determine
+// which version a function was introduced at. If the documentation does not
+// state which version a function was introduced at, it can be assumed to be
+// introduced at version 0, when the library was first created.
+// If this option is not set, all functions are available.
+func StringsVersion(version uint32) func(lib *stringLib) *stringLib {
+ return func(sl *stringLib) *stringLib {
+ sl.version = version
+ return sl
+ }
+}
+
+// CompileOptions implements the Library interface method.
+func (sl *stringLib) CompileOptions() []cel.EnvOption {
+ formatLocale := "en_US"
+ if sl.locale != "" {
+ // ensure locale is properly-formed if set
+ _, err := language.Parse(sl.locale)
+ if err != nil {
+ return []cel.EnvOption{
+ func(e *cel.Env) (*cel.Env, error) {
+ return nil, fmt.Errorf("failed to parse locale: %w", err)
+ },
+ }
+ }
+ formatLocale = sl.locale
+ }
+
+ opts := []cel.EnvOption{
cel.Function("charAt",
cel.MemberOverload("string_char_at_int", []*cel.Type{cel.StringType, cel.IntType}, cel.StringType,
cel.BinaryBinding(func(str, ind ref.Val) ref.Val {
@@ -303,28 +431,64 @@ func (stringLib) CompileOptions() []cel.EnvOption {
s := str.(types.String)
return stringOrError(upperASCII(string(s)))
}))),
- cel.Function("join",
- cel.MemberOverload("list_join", []*cel.Type{cel.ListType(cel.StringType)}, cel.StringType,
- cel.UnaryBinding(func(list ref.Val) ref.Val {
- l, err := list.ConvertToNative(stringListType)
- if err != nil {
- return types.NewErr(err.Error())
- }
- return stringOrError(join(l.([]string)))
- })),
- cel.MemberOverload("list_join_string", []*cel.Type{cel.ListType(cel.StringType), cel.StringType}, cel.StringType,
- cel.BinaryBinding(func(list, delim ref.Val) ref.Val {
- l, err := list.ConvertToNative(stringListType)
- if err != nil {
- return types.NewErr(err.Error())
- }
- d := delim.(types.String)
- return stringOrError(joinSeparator(l.([]string), string(d)))
+ }
+ if sl.version >= 1 {
+ opts = append(opts, cel.Function("format",
+ cel.MemberOverload("string_format", []*cel.Type{cel.StringType, cel.ListType(cel.DynType)}, cel.StringType,
+ cel.FunctionBinding(func(args ...ref.Val) ref.Val {
+ s := string(args[0].(types.String))
+ formatArgs := args[1].(traits.Lister)
+ return stringOrError(interpreter.ParseFormatString(s, &stringFormatter{}, &stringArgList{formatArgs}, formatLocale))
}))),
+ cel.Function("strings.quote", cel.Overload("strings_quote", []*cel.Type{cel.StringType}, cel.StringType,
+ cel.UnaryBinding(func(str ref.Val) ref.Val {
+ s := str.(types.String)
+ return stringOrError(quote(string(s)))
+ }))))
+
+ }
+ if sl.version >= 2 {
+ opts = append(opts,
+ cel.Function("join",
+ cel.MemberOverload("list_join", []*cel.Type{cel.ListType(cel.StringType)}, cel.StringType,
+ cel.UnaryBinding(func(list ref.Val) ref.Val {
+ l := list.(traits.Lister)
+ return stringOrError(joinValSeparator(l, ""))
+ })),
+ cel.MemberOverload("list_join_string", []*cel.Type{cel.ListType(cel.StringType), cel.StringType}, cel.StringType,
+ cel.BinaryBinding(func(list, delim ref.Val) ref.Val {
+ l := list.(traits.Lister)
+ d := delim.(types.String)
+ return stringOrError(joinValSeparator(l, string(d)))
+ }))),
+ )
+ } else {
+ opts = append(opts,
+ cel.Function("join",
+ cel.MemberOverload("list_join", []*cel.Type{cel.ListType(cel.StringType)}, cel.StringType,
+ cel.UnaryBinding(func(list ref.Val) ref.Val {
+ l, err := list.ConvertToNative(stringListType)
+ if err != nil {
+ return types.NewErr(err.Error())
+ }
+ return stringOrError(join(l.([]string)))
+ })),
+ cel.MemberOverload("list_join_string", []*cel.Type{cel.ListType(cel.StringType), cel.StringType}, cel.StringType,
+ cel.BinaryBinding(func(list, delim ref.Val) ref.Val {
+ l, err := list.ConvertToNative(stringListType)
+ if err != nil {
+ return types.NewErr(err.Error())
+ }
+ d := delim.(types.String)
+ return stringOrError(joinSeparator(l.([]string), string(d)))
+ }))),
+ )
}
+ return opts
}
-func (stringLib) ProgramOptions() []cel.ProgramOption {
+// ProgramOptions implements the Library interface method.
+func (*stringLib) ProgramOptions() []cel.ProgramOption {
return []cel.ProgramOption{}
}
@@ -478,6 +642,452 @@ func join(strs []string) (string, error) {
return strings.Join(strs, ""), nil
}
+func joinValSeparator(strs traits.Lister, separator string) (string, error) {
+ sz := strs.Size().(types.Int)
+ var sb strings.Builder
+ for i := types.Int(0); i < sz; i++ {
+ if i != 0 {
+ sb.WriteString(separator)
+ }
+ elem := strs.Get(i)
+ str, ok := elem.(types.String)
+ if !ok {
+ return "", fmt.Errorf("join: invalid input: %v", elem)
+ }
+ sb.WriteString(string(str))
+ }
+ return sb.String(), nil
+}
+
+type clauseImpl func(ref.Val, string) (string, error)
+
+func clauseForType(argType ref.Type) (clauseImpl, error) {
+ switch argType {
+ case types.IntType, types.UintType:
+ return formatDecimal, nil
+ case types.StringType, types.BytesType, types.BoolType, types.NullType, types.TypeType:
+ return FormatString, nil
+ case types.TimestampType, types.DurationType:
+ // special case to ensure timestamps/durations get printed as CEL literals
+ return func(arg ref.Val, locale string) (string, error) {
+ argStrVal := arg.ConvertToType(types.StringType)
+ argStr := argStrVal.Value().(string)
+ if arg.Type() == types.TimestampType {
+ return fmt.Sprintf("timestamp(%q)", argStr), nil
+ }
+ if arg.Type() == types.DurationType {
+ return fmt.Sprintf("duration(%q)", argStr), nil
+ }
+ return "", fmt.Errorf("cannot convert argument of type %s to timestamp/duration", arg.Type().TypeName())
+ }, nil
+ case types.ListType:
+ return formatList, nil
+ case types.MapType:
+ return formatMap, nil
+ case types.DoubleType:
+ // avoid formatFixed so we can output a period as the decimal separator in order
+ // to always be a valid CEL literal
+ return func(arg ref.Val, locale string) (string, error) {
+ argDouble, ok := arg.Value().(float64)
+ if !ok {
+ return "", fmt.Errorf("couldn't convert %s to float64", arg.Type().TypeName())
+ }
+ fmtStr := fmt.Sprintf("%%.%df", defaultPrecision)
+ return fmt.Sprintf(fmtStr, argDouble), nil
+ }, nil
+ case types.TypeType:
+ return func(arg ref.Val, locale string) (string, error) {
+ return fmt.Sprintf("type(%s)", arg.Value().(string)), nil
+ }, nil
+ default:
+ return nil, fmt.Errorf("no formatting function for %s", argType.TypeName())
+ }
+}
+
+func formatList(arg ref.Val, locale string) (string, error) {
+ argList := arg.(traits.Lister)
+ argIterator := argList.Iterator()
+ var listStrBuilder strings.Builder
+ _, err := listStrBuilder.WriteRune('[')
+ if err != nil {
+ return "", fmt.Errorf("error writing to list string: %w", err)
+ }
+ for argIterator.HasNext() == types.True {
+ member := argIterator.Next()
+ memberFormat, err := clauseForType(member.Type())
+ if err != nil {
+ return "", err
+ }
+ unquotedStr, err := memberFormat(member, locale)
+ if err != nil {
+ return "", err
+ }
+ str := quoteForCEL(member, unquotedStr)
+ _, err = listStrBuilder.WriteString(str)
+ if err != nil {
+ return "", fmt.Errorf("error writing to list string: %w", err)
+ }
+ if argIterator.HasNext() == types.True {
+ _, err = listStrBuilder.WriteString(", ")
+ if err != nil {
+ return "", fmt.Errorf("error writing to list string: %w", err)
+ }
+ }
+ }
+ _, err = listStrBuilder.WriteRune(']')
+ if err != nil {
+ return "", fmt.Errorf("error writing to list string: %w", err)
+ }
+ return listStrBuilder.String(), nil
+}
+
+func formatMap(arg ref.Val, locale string) (string, error) {
+ argMap := arg.(traits.Mapper)
+ argIterator := argMap.Iterator()
+ type mapPair struct {
+ key string
+ value string
+ }
+ argPairs := make([]mapPair, argMap.Size().Value().(int64))
+ i := 0
+ for argIterator.HasNext() == types.True {
+ key := argIterator.Next()
+ var keyFormat clauseImpl
+ switch key.Type() {
+ case types.StringType, types.BoolType:
+ keyFormat = FormatString
+ case types.IntType, types.UintType:
+ keyFormat = formatDecimal
+ default:
+ return "", fmt.Errorf("no formatting function for map key of type %s", key.Type().TypeName())
+ }
+ unquotedKeyStr, err := keyFormat(key, locale)
+ if err != nil {
+ return "", err
+ }
+ keyStr := quoteForCEL(key, unquotedKeyStr)
+ value, found := argMap.Find(key)
+ if !found {
+ return "", fmt.Errorf("could not find key: %q", key)
+ }
+ valueFormat, err := clauseForType(value.Type())
+ if err != nil {
+ return "", err
+ }
+ unquotedValueStr, err := valueFormat(value, locale)
+ if err != nil {
+ return "", err
+ }
+ valueStr := quoteForCEL(value, unquotedValueStr)
+ argPairs[i] = mapPair{keyStr, valueStr}
+ i++
+ }
+ sort.SliceStable(argPairs, func(x, y int) bool {
+ return argPairs[x].key < argPairs[y].key
+ })
+ var mapStrBuilder strings.Builder
+ _, err := mapStrBuilder.WriteRune('{')
+ if err != nil {
+ return "", fmt.Errorf("error writing to map string: %w", err)
+ }
+ for i, entry := range argPairs {
+ _, err = mapStrBuilder.WriteString(fmt.Sprintf("%s:%s", entry.key, entry.value))
+ if err != nil {
+ return "", fmt.Errorf("error writing to map string: %w", err)
+ }
+ if i < len(argPairs)-1 {
+ _, err = mapStrBuilder.WriteString(", ")
+ if err != nil {
+ return "", fmt.Errorf("error writing to map string: %w", err)
+ }
+ }
+ }
+ _, err = mapStrBuilder.WriteRune('}')
+ if err != nil {
+ return "", fmt.Errorf("error writing to map string: %w", err)
+ }
+ return mapStrBuilder.String(), nil
+}
+
+// quoteForCEL takes a formatted, unquoted value and quotes it in a manner
+// suitable for embedding directly in CEL.
+func quoteForCEL(refVal ref.Val, unquotedValue string) string {
+ switch refVal.Type() {
+ case types.StringType:
+ return fmt.Sprintf("%q", unquotedValue)
+ case types.BytesType:
+ return fmt.Sprintf("b%q", unquotedValue)
+ case types.DoubleType:
+ // special case to handle infinity/NaN
+ num := refVal.Value().(float64)
+ if math.IsInf(num, 1) || math.IsInf(num, -1) || math.IsNaN(num) {
+ return fmt.Sprintf("%q", unquotedValue)
+ }
+ return unquotedValue
+ default:
+ return unquotedValue
+ }
+}
+
+// FormatString returns the string representation of a CEL value.
+// It is used to implement the %s specifier in the (string).format() extension
+// function.
+func FormatString(arg ref.Val, locale string) (string, error) {
+ switch arg.Type() {
+ case types.ListType:
+ return formatList(arg, locale)
+ case types.MapType:
+ return formatMap(arg, locale)
+ case types.IntType, types.UintType, types.DoubleType,
+ types.BoolType, types.StringType, types.TimestampType, types.BytesType, types.DurationType, types.TypeType:
+ argStrVal := arg.ConvertToType(types.StringType)
+ argStr, ok := argStrVal.Value().(string)
+ if !ok {
+ return "", fmt.Errorf("could not convert argument %q to string", argStrVal)
+ }
+ return argStr, nil
+ case types.NullType:
+ return "null", nil
+ default:
+ return "", fmt.Errorf("string clause can only be used on strings, bools, bytes, ints, doubles, maps, lists, types, durations, and timestamps, was given %s", arg.Type().TypeName())
+ }
+}
+
+func formatDecimal(arg ref.Val, locale string) (string, error) {
+ switch arg.Type() {
+ case types.IntType:
+ argInt, ok := arg.ConvertToType(types.IntType).Value().(int64)
+ if !ok {
+ return "", fmt.Errorf("could not convert \"%s\" to int64", arg.Value())
+ }
+ return fmt.Sprintf("%d", argInt), nil
+ case types.UintType:
+ argInt, ok := arg.ConvertToType(types.UintType).Value().(uint64)
+ if !ok {
+ return "", fmt.Errorf("could not convert \"%s\" to uint64", arg.Value())
+ }
+ return fmt.Sprintf("%d", argInt), nil
+ default:
+ return "", fmt.Errorf("decimal clause can only be used on integers, was given %s", arg.Type().TypeName())
+ }
+}
+
+func matchLanguage(locale string) (language.Tag, error) {
+ matcher, err := makeMatcher(locale)
+ if err != nil {
+ return language.Und, err
+ }
+ tag, _ := language.MatchStrings(matcher, locale)
+ return tag, nil
+}
+
+func makeMatcher(locale string) (language.Matcher, error) {
+ tags := make([]language.Tag, 0)
+ tag, err := language.Parse(locale)
+ if err != nil {
+ return nil, err
+ }
+ tags = append(tags, tag)
+ return language.NewMatcher(tags), nil
+}
+
+// quote implements a string quoting function. The string will be wrapped in
+// double quotes, and all valid CEL escape sequences will be escaped to show up
+// literally if printed. If the input contains any invalid UTF-8, the invalid runes
+// will be replaced with utf8.RuneError.
+func quote(s string) (string, error) {
+ var quotedStrBuilder strings.Builder
+ for _, c := range sanitize(s) {
+ switch c {
+ case '\a':
+ quotedStrBuilder.WriteString("\\a")
+ case '\b':
+ quotedStrBuilder.WriteString("\\b")
+ case '\f':
+ quotedStrBuilder.WriteString("\\f")
+ case '\n':
+ quotedStrBuilder.WriteString("\\n")
+ case '\r':
+ quotedStrBuilder.WriteString("\\r")
+ case '\t':
+ quotedStrBuilder.WriteString("\\t")
+ case '\v':
+ quotedStrBuilder.WriteString("\\v")
+ case '\\':
+ quotedStrBuilder.WriteString("\\\\")
+ case '"':
+ quotedStrBuilder.WriteString("\\\"")
+ default:
+ quotedStrBuilder.WriteRune(c)
+ }
+ }
+ escapedStr := quotedStrBuilder.String()
+ return "\"" + escapedStr + "\"", nil
+}
+
+// sanitize replaces all invalid runes in the given string with utf8.RuneError.
+func sanitize(s string) string {
+ var sanitizedStringBuilder strings.Builder
+ for _, r := range s {
+ if !utf8.ValidRune(r) {
+ sanitizedStringBuilder.WriteRune(utf8.RuneError)
+ } else {
+ sanitizedStringBuilder.WriteRune(r)
+ }
+ }
+ return sanitizedStringBuilder.String()
+}
+
+type stringFormatter struct{}
+
+func (c *stringFormatter) String(arg ref.Val, locale string) (string, error) {
+ return FormatString(arg, locale)
+}
+
+func (c *stringFormatter) Decimal(arg ref.Val, locale string) (string, error) {
+ return formatDecimal(arg, locale)
+}
+
+func (c *stringFormatter) Fixed(precision *int) func(ref.Val, string) (string, error) {
+ if precision == nil {
+ precision = new(int)
+ *precision = defaultPrecision
+ }
+ return func(arg ref.Val, locale string) (string, error) {
+ strException := false
+ if arg.Type() == types.StringType {
+ argStr := arg.Value().(string)
+ if argStr == "NaN" || argStr == "Infinity" || argStr == "-Infinity" {
+ strException = true
+ }
+ }
+ if arg.Type() != types.DoubleType && !strException {
+ return "", fmt.Errorf("fixed-point clause can only be used on doubles, was given %s", arg.Type().TypeName())
+ }
+ argFloatVal := arg.ConvertToType(types.DoubleType)
+ argFloat, ok := argFloatVal.Value().(float64)
+ if !ok {
+ return "", fmt.Errorf("could not convert \"%s\" to float64", argFloatVal.Value())
+ }
+ fmtStr := fmt.Sprintf("%%.%df", *precision)
+
+ matchedLocale, err := matchLanguage(locale)
+ if err != nil {
+ return "", fmt.Errorf("error matching locale: %w", err)
+ }
+ return message.NewPrinter(matchedLocale).Sprintf(fmtStr, argFloat), nil
+ }
+}
+
+func (c *stringFormatter) Scientific(precision *int) func(ref.Val, string) (string, error) {
+ if precision == nil {
+ precision = new(int)
+ *precision = defaultPrecision
+ }
+ return func(arg ref.Val, locale string) (string, error) {
+ strException := false
+ if arg.Type() == types.StringType {
+ argStr := arg.Value().(string)
+ if argStr == "NaN" || argStr == "Infinity" || argStr == "-Infinity" {
+ strException = true
+ }
+ }
+ if arg.Type() != types.DoubleType && !strException {
+ return "", fmt.Errorf("scientific clause can only be used on doubles, was given %s", arg.Type().TypeName())
+ }
+ argFloatVal := arg.ConvertToType(types.DoubleType)
+ argFloat, ok := argFloatVal.Value().(float64)
+ if !ok {
+ return "", fmt.Errorf("could not convert \"%s\" to float64", argFloatVal.Value())
+ }
+ matchedLocale, err := matchLanguage(locale)
+ if err != nil {
+ return "", fmt.Errorf("error matching locale: %w", err)
+ }
+ fmtStr := fmt.Sprintf("%%%de", *precision)
+ return message.NewPrinter(matchedLocale).Sprintf(fmtStr, argFloat), nil
+ }
+}
+
+func (c *stringFormatter) Binary(arg ref.Val, locale string) (string, error) {
+ switch arg.Type() {
+ case types.IntType:
+ argInt := arg.Value().(int64)
+ // locale is intentionally unused as integers formatted as binary
+ // strings are locale-independent
+ return fmt.Sprintf("%b", argInt), nil
+ case types.UintType:
+ argInt := arg.Value().(uint64)
+ return fmt.Sprintf("%b", argInt), nil
+ case types.BoolType:
+ argBool := arg.Value().(bool)
+ if argBool {
+ return "1", nil
+ }
+ return "0", nil
+ default:
+ return "", fmt.Errorf("only integers and bools can be formatted as binary, was given %s", arg.Type().TypeName())
+ }
+}
+
+func (c *stringFormatter) Hex(useUpper bool) func(ref.Val, string) (string, error) {
+ return func(arg ref.Val, locale string) (string, error) {
+ fmtStr := "%x"
+ if useUpper {
+ fmtStr = "%X"
+ }
+ switch arg.Type() {
+ case types.StringType, types.BytesType:
+ if arg.Type() == types.BytesType {
+ return fmt.Sprintf(fmtStr, arg.Value().([]byte)), nil
+ }
+ return fmt.Sprintf(fmtStr, arg.Value().(string)), nil
+ case types.IntType:
+ argInt, ok := arg.Value().(int64)
+ if !ok {
+ return "", fmt.Errorf("could not convert \"%s\" to int64", arg.Value())
+ }
+ return fmt.Sprintf(fmtStr, argInt), nil
+ case types.UintType:
+ argInt, ok := arg.Value().(uint64)
+ if !ok {
+ return "", fmt.Errorf("could not convert \"%s\" to uint64", arg.Value())
+ }
+ return fmt.Sprintf(fmtStr, argInt), nil
+ default:
+ return "", fmt.Errorf("only integers, byte buffers, and strings can be formatted as hex, was given %s", arg.Type().TypeName())
+ }
+ }
+}
+
+func (c *stringFormatter) Octal(arg ref.Val, locale string) (string, error) {
+ switch arg.Type() {
+ case types.IntType:
+ argInt := arg.Value().(int64)
+ return fmt.Sprintf("%o", argInt), nil
+ case types.UintType:
+ argInt := arg.Value().(uint64)
+ return fmt.Sprintf("%o", argInt), nil
+ default:
+ return "", fmt.Errorf("octal clause can only be used on integers, was given %s", arg.Type().TypeName())
+ }
+}
+
+type stringArgList struct {
+ args traits.Lister
+}
+
+func (c *stringArgList) Arg(index int64) (ref.Val, error) {
+ if index >= c.args.Size().Value().(int64) {
+ return nil, fmt.Errorf("index %d out of range", index)
+ }
+ return c.args.Get(types.Int(index)), nil
+}
+
+func (c *stringArgList) ArgSize() int64 {
+ return c.args.Size().Value().(int64)
+}
+
var (
stringListType = reflect.TypeOf([]string{})
)
diff --git a/vendor/github.com/google/cel-go/interpreter/BUILD.bazel b/vendor/github.com/google/cel-go/interpreter/BUILD.bazel
index 04a3ec7441..b6d04e0003 100644
--- a/vendor/github.com/google/cel-go/interpreter/BUILD.bazel
+++ b/vendor/github.com/google/cel-go/interpreter/BUILD.bazel
@@ -11,10 +11,10 @@ go_library(
"activation.go",
"attribute_patterns.go",
"attributes.go",
- "coster.go",
"decorators.go",
"dispatcher.go",
"evalstate.go",
+ "formatting.go",
"interpretable.go",
"interpreter.go",
"optimizations.go",
@@ -32,7 +32,7 @@ go_library(
"//common/types/ref:go_default_library",
"//common/types/traits:go_default_library",
"//interpreter/functions:go_default_library",
- "@org_golang_google_genproto//googleapis/api/expr/v1alpha1:go_default_library",
+ "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library",
"@org_golang_google_protobuf//proto:go_default_library",
"@org_golang_google_protobuf//types/known/durationpb:go_default_library",
"@org_golang_google_protobuf//types/known/structpb:go_default_library",
@@ -49,6 +49,7 @@ go_test(
"attributes_test.go",
"interpreter_test.go",
"prune_test.go",
+ "runtimecost_test.go",
],
embed = [
":go_default_library",
@@ -65,7 +66,7 @@ go_test(
"//test:go_default_library",
"//test/proto2pb:go_default_library",
"//test/proto3pb:go_default_library",
- "@org_golang_google_genproto//googleapis/api/expr/v1alpha1:go_default_library",
+ "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library",
"@org_golang_google_protobuf//proto:go_default_library",
"@org_golang_google_protobuf//types/known/anypb:go_default_library",
],
diff --git a/vendor/github.com/google/cel-go/interpreter/activation.go b/vendor/github.com/google/cel-go/interpreter/activation.go
index 8686d4f04f..f82e4e9038 100644
--- a/vendor/github.com/google/cel-go/interpreter/activation.go
+++ b/vendor/github.com/google/cel-go/interpreter/activation.go
@@ -28,7 +28,7 @@ import (
type Activation interface {
// ResolveName returns a value from the activation by qualified name, or false if the name
// could not be found.
- ResolveName(name string) (interface{}, bool)
+ ResolveName(name string) (any, bool)
// Parent returns the parent of the current activation, may be nil.
// If non-nil, the parent will be searched during resolve calls.
@@ -43,23 +43,23 @@ func EmptyActivation() Activation {
// emptyActivation is a variable-free activation.
type emptyActivation struct{}
-func (emptyActivation) ResolveName(string) (interface{}, bool) { return nil, false }
-func (emptyActivation) Parent() Activation { return nil }
+func (emptyActivation) ResolveName(string) (any, bool) { return nil, false }
+func (emptyActivation) Parent() Activation { return nil }
// NewActivation returns an activation based on a map-based binding where the map keys are
// expected to be qualified names used with ResolveName calls.
//
-// The input `bindings` may either be of type `Activation` or `map[string]interface{}`.
+// The input `bindings` may either be of type `Activation` or `map[string]any`.
//
// Lazy bindings may be supplied within the map-based input in either of the following forms:
-// - func() interface{}
+// - func() any
// - func() ref.Val
//
// The output of the lazy binding will overwrite the variable reference in the internal map.
//
// Values which are not represented as ref.Val types on input may be adapted to a ref.Val using
// the ref.TypeAdapter configured in the environment.
-func NewActivation(bindings interface{}) (Activation, error) {
+func NewActivation(bindings any) (Activation, error) {
if bindings == nil {
return nil, errors.New("bindings must be non-nil")
}
@@ -67,7 +67,7 @@ func NewActivation(bindings interface{}) (Activation, error) {
if isActivation {
return a, nil
}
- m, isMap := bindings.(map[string]interface{})
+ m, isMap := bindings.(map[string]any)
if !isMap {
return nil, fmt.Errorf(
"activation input must be an activation or map[string]interface: got %T",
@@ -81,7 +81,7 @@ func NewActivation(bindings interface{}) (Activation, error) {
// Named bindings may lazily supply values by providing a function which accepts no arguments and
// produces an interface value.
type mapActivation struct {
- bindings map[string]interface{}
+ bindings map[string]any
}
// Parent implements the Activation interface method.
@@ -90,7 +90,7 @@ func (a *mapActivation) Parent() Activation {
}
// ResolveName implements the Activation interface method.
-func (a *mapActivation) ResolveName(name string) (interface{}, bool) {
+func (a *mapActivation) ResolveName(name string) (any, bool) {
obj, found := a.bindings[name]
if !found {
return nil, false
@@ -100,7 +100,7 @@ func (a *mapActivation) ResolveName(name string) (interface{}, bool) {
obj = fn()
a.bindings[name] = obj
}
- fnRaw, isLazy := obj.(func() interface{})
+ fnRaw, isLazy := obj.(func() any)
if isLazy {
obj = fnRaw()
a.bindings[name] = obj
@@ -121,7 +121,7 @@ func (a *hierarchicalActivation) Parent() Activation {
}
// ResolveName implements the Activation interface method.
-func (a *hierarchicalActivation) ResolveName(name string) (interface{}, bool) {
+func (a *hierarchicalActivation) ResolveName(name string) (any, bool) {
if object, found := a.child.ResolveName(name); found {
return object, found
}
@@ -138,8 +138,8 @@ func NewHierarchicalActivation(parent Activation, child Activation) Activation {
// representing field and index operations that should result in a 'types.Unknown' result.
//
// The `bindings` value may be any value type supported by the interpreter.NewActivation call,
-// but is typically either an existing Activation or map[string]interface{}.
-func NewPartialActivation(bindings interface{},
+// but is typically either an existing Activation or map[string]any.
+func NewPartialActivation(bindings any,
unknowns ...*AttributePattern) (PartialActivation, error) {
a, err := NewActivation(bindings)
if err != nil {
@@ -184,7 +184,7 @@ func (v *varActivation) Parent() Activation {
}
// ResolveName implements the Activation interface method.
-func (v *varActivation) ResolveName(name string) (interface{}, bool) {
+func (v *varActivation) ResolveName(name string) (any, bool) {
if name == v.name {
return v.val, true
}
@@ -194,7 +194,7 @@ func (v *varActivation) ResolveName(name string) (interface{}, bool) {
var (
// pool of var activations to reduce allocations during folds.
varActivationPool = &sync.Pool{
- New: func() interface{} {
+ New: func() any {
return &varActivation{}
},
}
diff --git a/vendor/github.com/google/cel-go/interpreter/attribute_patterns.go b/vendor/github.com/google/cel-go/interpreter/attribute_patterns.go
index b33f7f7fd9..afb7c8d5bf 100644
--- a/vendor/github.com/google/cel-go/interpreter/attribute_patterns.go
+++ b/vendor/github.com/google/cel-go/interpreter/attribute_patterns.go
@@ -15,8 +15,6 @@
package interpreter
import (
- "fmt"
-
"github.com/google/cel-go/common/containers"
"github.com/google/cel-go/common/types"
"github.com/google/cel-go/common/types/ref"
@@ -36,9 +34,9 @@ import (
//
// Examples:
//
-// 1. ns.myvar["complex-value"]
-// 2. ns.myvar["complex-value"][0]
-// 3. ns.myvar["complex-value"].*.name
+// 1. ns.myvar["complex-value"]
+// 2. ns.myvar["complex-value"][0]
+// 3. ns.myvar["complex-value"].*.name
//
// The first example is simple: match an attribute where the variable is 'ns.myvar' with a
// field access on 'complex-value'. The second example expands the match to indicate that only
@@ -108,7 +106,7 @@ func (apat *AttributePattern) QualifierPatterns() []*AttributeQualifierPattern {
// AttributeQualifierPattern holds a wildcard or valued qualifier pattern.
type AttributeQualifierPattern struct {
wildcard bool
- value interface{}
+ value any
}
// Matches returns true if the qualifier pattern is a wildcard, or the Qualifier implements the
@@ -134,44 +132,44 @@ func (qpat *AttributeQualifierPattern) Matches(q Qualifier) bool {
type qualifierValueEquator interface {
// QualifierValueEquals returns true if the input value is equal to the value held in the
// Qualifier.
- QualifierValueEquals(value interface{}) bool
+ QualifierValueEquals(value any) bool
}
// QualifierValueEquals implementation for boolean qualifiers.
-func (q *boolQualifier) QualifierValueEquals(value interface{}) bool {
+func (q *boolQualifier) QualifierValueEquals(value any) bool {
bval, ok := value.(bool)
return ok && q.value == bval
}
// QualifierValueEquals implementation for field qualifiers.
-func (q *fieldQualifier) QualifierValueEquals(value interface{}) bool {
+func (q *fieldQualifier) QualifierValueEquals(value any) bool {
sval, ok := value.(string)
return ok && q.Name == sval
}
// QualifierValueEquals implementation for string qualifiers.
-func (q *stringQualifier) QualifierValueEquals(value interface{}) bool {
+func (q *stringQualifier) QualifierValueEquals(value any) bool {
sval, ok := value.(string)
return ok && q.value == sval
}
// QualifierValueEquals implementation for int qualifiers.
-func (q *intQualifier) QualifierValueEquals(value interface{}) bool {
+func (q *intQualifier) QualifierValueEquals(value any) bool {
return numericValueEquals(value, q.celValue)
}
// QualifierValueEquals implementation for uint qualifiers.
-func (q *uintQualifier) QualifierValueEquals(value interface{}) bool {
+func (q *uintQualifier) QualifierValueEquals(value any) bool {
return numericValueEquals(value, q.celValue)
}
// QualifierValueEquals implementation for double qualifiers.
-func (q *doubleQualifier) QualifierValueEquals(value interface{}) bool {
+func (q *doubleQualifier) QualifierValueEquals(value any) bool {
return numericValueEquals(value, q.celValue)
}
// numericValueEquals uses CEL equality to determine whether two number values are
-func numericValueEquals(value interface{}, celValue ref.Val) bool {
+func numericValueEquals(value any, celValue ref.Val) bool {
val := types.DefaultTypeAdapter.NativeToValue(value)
return celValue.Equal(val) == types.True
}
@@ -272,13 +270,9 @@ func (fac *partialAttributeFactory) matchesUnknownPatterns(
if err != nil {
return nil, err
}
- unk, isUnk := val.(types.Unknown)
- if isUnk {
- return unk, nil
- }
// If this resolution behavior ever changes, new implementations of the
// qualifierValueEquator may be required to handle proper resolution.
- qual, err = fac.NewQualifier(nil, qual.ID(), val)
+ qual, err = fac.NewQualifier(nil, qual.ID(), val, attr.IsOptional())
if err != nil {
return nil, err
}
@@ -338,24 +332,10 @@ func (m *attributeMatcher) AddQualifier(qual Qualifier) (Attribute, error) {
return m, nil
}
-// Resolve is an implementation of the Attribute interface method which uses the
-// attributeMatcher TryResolve implementation rather than the embedded NamespacedAttribute
-// Resolve implementation.
-func (m *attributeMatcher) Resolve(vars Activation) (interface{}, error) {
- obj, found, err := m.TryResolve(vars)
- if err != nil {
- return nil, err
- }
- if !found {
- return nil, fmt.Errorf("no such attribute: %v", m.NamespacedAttribute)
- }
- return obj, nil
-}
-
-// TryResolve is an implementation of the NamespacedAttribute interface method which tests
+// Resolve is an implementation of the NamespacedAttribute interface method which tests
// for matching unknown attribute patterns and returns types.Unknown if present. Otherwise,
// the standard Resolve logic applies.
-func (m *attributeMatcher) TryResolve(vars Activation) (interface{}, bool, error) {
+func (m *attributeMatcher) Resolve(vars Activation) (any, error) {
id := m.NamespacedAttribute.ID()
// Bug in how partial activation is resolved, should search parents as well.
partial, isPartial := toPartialActivation(vars)
@@ -366,30 +346,23 @@ func (m *attributeMatcher) TryResolve(vars Activation) (interface{}, bool, error
m.CandidateVariableNames(),
m.qualifiers)
if err != nil {
- return nil, true, err
+ return nil, err
}
if unk != nil {
- return unk, true, nil
+ return unk, nil
}
}
- return m.NamespacedAttribute.TryResolve(vars)
+ return m.NamespacedAttribute.Resolve(vars)
}
// Qualify is an implementation of the Qualifier interface method.
-func (m *attributeMatcher) Qualify(vars Activation, obj interface{}) (interface{}, error) {
- val, err := m.Resolve(vars)
- if err != nil {
- return nil, err
- }
- unk, isUnk := val.(types.Unknown)
- if isUnk {
- return unk, nil
- }
- qual, err := m.fac.NewQualifier(nil, m.ID(), val)
- if err != nil {
- return nil, err
- }
- return qual.Qualify(vars, obj)
+func (m *attributeMatcher) Qualify(vars Activation, obj any) (any, error) {
+ return attrQualify(m.fac, vars, obj, m)
+}
+
+// QualifyIfPresent is an implementation of the Qualifier interface method.
+func (m *attributeMatcher) QualifyIfPresent(vars Activation, obj any, presenceOnly bool) (any, bool, error) {
+ return attrQualifyIfPresent(m.fac, vars, obj, m, presenceOnly)
}
func toPartialActivation(vars Activation) (PartialActivation, bool) {
diff --git a/vendor/github.com/google/cel-go/interpreter/attributes.go b/vendor/github.com/google/cel-go/interpreter/attributes.go
index 4f1772ea39..1b19dc2b57 100644
--- a/vendor/github.com/google/cel-go/interpreter/attributes.go
+++ b/vendor/github.com/google/cel-go/interpreter/attributes.go
@@ -16,7 +16,7 @@ package interpreter
import (
"fmt"
- "math"
+ "strings"
"github.com/google/cel-go/common/containers"
"github.com/google/cel-go/common/types"
@@ -61,7 +61,7 @@ type AttributeFactory interface {
// The qualifier may consider the object type being qualified, if present. If absent, the
// qualification should be considered dynamic and the qualification should still work, though
// it may be sub-optimal.
- NewQualifier(objType *exprpb.Type, qualID int64, val interface{}) (Qualifier, error)
+ NewQualifier(objType *exprpb.Type, qualID int64, val any, opt bool) (Qualifier, error)
}
// Qualifier marker interface for designating different qualifier values and where they appear
@@ -70,9 +70,21 @@ type Qualifier interface {
// ID where the qualifier appears within an expression.
ID() int64
+ // IsOptional specifies whether the qualifier is optional.
+ // Instead of a direct qualification, an optional qualifier will be resolved via QualifyIfPresent
+ // rather than Qualify. A non-optional qualifier may also be resolved through QualifyIfPresent if
+ // the object to qualify is itself optional.
+ IsOptional() bool
+
// Qualify performs a qualification, e.g. field selection, on the input object and returns
- // the value or error that results.
- Qualify(vars Activation, obj interface{}) (interface{}, error)
+ // the value of the access and whether the value was set. A non-nil value with a false presence
+ // test result indicates that the value being returned is the default value.
+ Qualify(vars Activation, obj any) (any, error)
+
+ // QualifyIfPresent qualifies the object if the qualifier is declared or defined on the object.
+ // The 'presenceOnly' flag indicates that the value is not necessary, just a boolean status as
+ // to whether the qualifier is present.
+ QualifyIfPresent(vars Activation, obj any, presenceOnly bool) (any, bool, error)
}
// ConstantQualifier interface embeds the Qualifier interface and provides an option to inspect the
@@ -82,6 +94,7 @@ type Qualifier interface {
type ConstantQualifier interface {
Qualifier
+ // Value returns the constant value associated with the qualifier.
Value() ref.Val
}
@@ -90,12 +103,16 @@ type ConstantQualifier interface {
type Attribute interface {
Qualifier
- // AddQualifier adds a qualifier on the Attribute or error if the qualification is not a valid
- // qualifier type.
+ // AddQualifier adds a qualifier on the Attribute or error if the qualification is not a valid qualifier type.
AddQualifier(Qualifier) (Attribute, error)
- // Resolve returns the value of the Attribute given the current Activation.
- Resolve(Activation) (interface{}, error)
+ // Resolve returns the value of the Attribute and whether it was present given an Activation.
+ // For objects which support safe traversal, the value may be non-nil and the presence flag be false.
+ //
+ // If an error is encountered during attribute resolution, it will be returned immediately.
+ // If the attribute cannot be resolved within the Activation, the result must be: `nil`, `error`
+ // with the error indicating which variable was missing.
+ Resolve(Activation) (any, error)
}
// NamespacedAttribute values are a variable within a namespace, and an optional set of qualifiers
@@ -107,22 +124,14 @@ type NamespacedAttribute interface {
// the CEL namespace resolution order.
CandidateVariableNames() []string
- // Qualifiers returns the list of qualifiers associated with the Attribute.s
+ // Qualifiers returns the list of qualifiers associated with the Attribute.
Qualifiers() []Qualifier
-
- // TryResolve attempts to return the value of the attribute given the current Activation.
- // If an error is encountered during attribute resolution, it will be returned immediately.
- // If the attribute cannot be resolved within the Activation, the result must be: `nil`,
- // `false`, `nil`.
- TryResolve(Activation) (interface{}, bool, error)
}
// NewAttributeFactory returns a default AttributeFactory which is produces Attribute values
// capable of resolving types by simple names and qualify the values using the supported qualifier
// types: bool, int, string, and uint.
-func NewAttributeFactory(cont *containers.Container,
- a ref.TypeAdapter,
- p ref.TypeProvider) AttributeFactory {
+func NewAttributeFactory(cont *containers.Container, a ref.TypeAdapter, p ref.TypeProvider) AttributeFactory {
return &attrFactory{
container: cont,
adapter: a,
@@ -190,9 +199,7 @@ func (r *attrFactory) RelativeAttribute(id int64, operand Interpretable) Attribu
}
// NewQualifier is an implementation of the AttributeFactory interface.
-func (r *attrFactory) NewQualifier(objType *exprpb.Type,
- qualID int64,
- val interface{}) (Qualifier, error) {
+func (r *attrFactory) NewQualifier(objType *exprpb.Type, qualID int64, val any, opt bool) (Qualifier, error) {
// Before creating a new qualifier check to see if this is a protobuf message field access.
// If so, use the precomputed GetFrom qualification method rather than the standard
// stringQualifier.
@@ -205,10 +212,11 @@ func (r *attrFactory) NewQualifier(objType *exprpb.Type,
Name: str,
FieldType: ft,
adapter: r.adapter,
+ optional: opt,
}, nil
}
}
- return newQualifier(r.adapter, qualID, val)
+ return newQualifier(r.adapter, qualID, val, opt)
}
type absoluteAttribute struct {
@@ -224,19 +232,18 @@ type absoluteAttribute struct {
// ID implements the Attribute interface method.
func (a *absoluteAttribute) ID() int64 {
- return a.id
+ qualCount := len(a.qualifiers)
+ if qualCount == 0 {
+ return a.id
+ }
+ return a.qualifiers[qualCount-1].ID()
}
-// Cost implements the Coster interface method.
-func (a *absoluteAttribute) Cost() (min, max int64) {
- for _, q := range a.qualifiers {
- minQ, maxQ := estimateCost(q)
- min += minQ
- max += maxQ
- }
- min++ // For object retrieval.
- max++
- return
+// IsOptional returns trivially false for an attribute as the attribute represents a fully
+// qualified variable name. If the attribute is used in an optional manner, then an attrQualifier
+// is created and marks the attribute as optional.
+func (a *absoluteAttribute) IsOptional() bool {
+ return false
}
// AddQualifier implements the Attribute interface method.
@@ -256,33 +263,13 @@ func (a *absoluteAttribute) Qualifiers() []Qualifier {
}
// Qualify is an implementation of the Qualifier interface method.
-func (a *absoluteAttribute) Qualify(vars Activation, obj interface{}) (interface{}, error) {
- val, err := a.Resolve(vars)
- if err != nil {
- return nil, err
- }
- unk, isUnk := val.(types.Unknown)
- if isUnk {
- return unk, nil
- }
- qual, err := a.fac.NewQualifier(nil, a.id, val)
- if err != nil {
- return nil, err
- }
- return qual.Qualify(vars, obj)
+func (a *absoluteAttribute) Qualify(vars Activation, obj any) (any, error) {
+ return attrQualify(a.fac, vars, obj, a)
}
-// Resolve returns the resolved Attribute value given the Activation, or error if the Attribute
-// variable is not found, or if its Qualifiers cannot be applied successfully.
-func (a *absoluteAttribute) Resolve(vars Activation) (interface{}, error) {
- obj, found, err := a.TryResolve(vars)
- if err != nil {
- return nil, err
- }
- if found {
- return obj, nil
- }
- return nil, fmt.Errorf("no such attribute: %v", a)
+// QualifyIfPresent is an implementation of the Qualifier interface method.
+func (a *absoluteAttribute) QualifyIfPresent(vars Activation, obj any, presenceOnly bool) (any, bool, error) {
+ return attrQualifyIfPresent(a.fac, vars, obj, a, presenceOnly)
}
// String implements the Stringer interface method.
@@ -290,36 +277,47 @@ func (a *absoluteAttribute) String() string {
return fmt.Sprintf("id: %v, names: %v", a.id, a.namespaceNames)
}
-// TryResolve iterates through the namespaced variable names until one is found within the
-// Activation or TypeProvider.
+// Resolve returns the resolved Attribute value given the Activation, or error if the Attribute
+// variable is not found, or if its Qualifiers cannot be applied successfully.
//
// If the variable name cannot be found as an Activation variable or in the TypeProvider as
-// a type, then the result is `nil`, `false`, `nil` per the interface requirement.
-func (a *absoluteAttribute) TryResolve(vars Activation) (interface{}, bool, error) {
+// a type, then the result is `nil`, `error` with the error indicating the name of the first
+// variable searched as missing.
+func (a *absoluteAttribute) Resolve(vars Activation) (any, error) {
for _, nm := range a.namespaceNames {
// If the variable is found, process it. Otherwise, wait until the checks to
// determine whether the type is unknown before returning.
- op, found := vars.ResolveName(nm)
+ obj, found := vars.ResolveName(nm)
if found {
- var err error
- for _, qual := range a.qualifiers {
- op, err = qual.Qualify(vars, op)
- if err != nil {
- return nil, true, err
+ obj, isOpt, err := applyQualifiers(vars, obj, a.qualifiers)
+ if err != nil {
+ return nil, err
+ }
+ if isOpt {
+ val := a.adapter.NativeToValue(obj)
+ if types.IsUnknown(val) {
+ return val, nil
}
+ return types.OptionalOf(val), nil
}
- return op, true, nil
+ return obj, nil
}
// Attempt to resolve the qualified type name if the name is not a variable identifier.
typ, found := a.provider.FindIdent(nm)
if found {
if len(a.qualifiers) == 0 {
- return typ, true, nil
+ return typ, nil
}
- return nil, true, fmt.Errorf("no such attribute: %v", typ)
}
}
- return nil, false, nil
+ var attrNames strings.Builder
+ for i, nm := range a.namespaceNames {
+ if i != 0 {
+ attrNames.WriteString(", ")
+ }
+ attrNames.WriteString(nm)
+ }
+ return nil, missingAttribute(attrNames.String())
}
type conditionalAttribute struct {
@@ -333,17 +331,19 @@ type conditionalAttribute struct {
// ID is an implementation of the Attribute interface method.
func (a *conditionalAttribute) ID() int64 {
+ // There's a field access after the conditional.
+ if a.truthy.ID() == a.falsy.ID() {
+ return a.truthy.ID()
+ }
+ // Otherwise return the conditional id as the consistent id being tracked.
return a.id
}
-// Cost provides the heuristic cost of a ternary operation ? : .
-// The cost is computed as cost(expr) plus the min/max costs of evaluating either
-// `t` or `f`.
-func (a *conditionalAttribute) Cost() (min, max int64) {
- tMin, tMax := estimateCost(a.truthy)
- fMin, fMax := estimateCost(a.falsy)
- eMin, eMax := estimateCost(a.expr)
- return eMin + findMin(tMin, fMin), eMax + findMax(tMax, fMax)
+// IsOptional returns trivially false for an attribute as the attribute represents a fully
+// qualified variable name. If the attribute is used in an optional manner, then an attrQualifier
+// is created and marks the attribute as optional.
+func (a *conditionalAttribute) IsOptional() bool {
+ return false
}
// AddQualifier appends the same qualifier to both sides of the conditional, in effect managing
@@ -361,28 +361,18 @@ func (a *conditionalAttribute) AddQualifier(qual Qualifier) (Attribute, error) {
}
// Qualify is an implementation of the Qualifier interface method.
-func (a *conditionalAttribute) Qualify(vars Activation, obj interface{}) (interface{}, error) {
- val, err := a.Resolve(vars)
- if err != nil {
- return nil, err
- }
- unk, isUnk := val.(types.Unknown)
- if isUnk {
- return unk, nil
- }
- qual, err := a.fac.NewQualifier(nil, a.id, val)
- if err != nil {
- return nil, err
- }
- return qual.Qualify(vars, obj)
+func (a *conditionalAttribute) Qualify(vars Activation, obj any) (any, error) {
+ return attrQualify(a.fac, vars, obj, a)
+}
+
+// QualifyIfPresent is an implementation of the Qualifier interface method.
+func (a *conditionalAttribute) QualifyIfPresent(vars Activation, obj any, presenceOnly bool) (any, bool, error) {
+ return attrQualifyIfPresent(a.fac, vars, obj, a, presenceOnly)
}
// Resolve evaluates the condition, and then resolves the truthy or falsy branch accordingly.
-func (a *conditionalAttribute) Resolve(vars Activation) (interface{}, error) {
+func (a *conditionalAttribute) Resolve(vars Activation) (any, error) {
val := a.expr.Eval(vars)
- if types.IsError(val) {
- return nil, val.(*types.Err)
- }
if val == types.True {
return a.truthy.Resolve(vars)
}
@@ -410,33 +400,14 @@ type maybeAttribute struct {
// ID is an implementation of the Attribute interface method.
func (a *maybeAttribute) ID() int64 {
- return a.id
+ return a.attrs[0].ID()
}
-// Cost implements the Coster interface method. The min cost is computed as the minimal cost among
-// all the possible attributes, the max cost ditto.
-func (a *maybeAttribute) Cost() (min, max int64) {
- min, max = math.MaxInt64, 0
- for _, a := range a.attrs {
- minA, maxA := estimateCost(a)
- min = findMin(min, minA)
- max = findMax(max, maxA)
- }
- return
-}
-
-func findMin(x, y int64) int64 {
- if x < y {
- return x
- }
- return y
-}
-
-func findMax(x, y int64) int64 {
- if x > y {
- return x
- }
- return y
+// IsOptional returns trivially false for an attribute as the attribute represents a fully
+// qualified variable name. If the attribute is used in an optional manner, then an attrQualifier
+// is created and marks the attribute as optional.
+func (a *maybeAttribute) IsOptional() bool {
+ return false
}
// AddQualifier adds a qualifier to each possible attribute variant, and also creates
@@ -446,21 +417,21 @@ func findMax(x, y int64) int64 {
//
// 1. Create a maybe attribute from a simple identifier when it occurs in a parsed-only expression
//
-// mb = MaybeAttribute(, "a")
+// mb = MaybeAttribute(, "a")
//
-// Initializing the maybe attribute creates an absolute attribute internally which includes the
-// possible namespaced names of the attribute. In this example, let's assume we are in namespace
-// 'ns', then the maybe is either one of the following variable names:
+// Initializing the maybe attribute creates an absolute attribute internally which includes the
+// possible namespaced names of the attribute. In this example, let's assume we are in namespace
+// 'ns', then the maybe is either one of the following variable names:
//
-// possible variables names -- ns.a, a
+// possible variables names -- ns.a, a
//
// 2. Adding a qualifier to the maybe means that the variable name could be a longer qualified
-// name, or a field selection on one of the possible variable names produced earlier:
+// name, or a field selection on one of the possible variable names produced earlier:
//
-// mb.AddQualifier("b")
+// mb.AddQualifier("b")
//
-// possible variables names -- ns.a.b, a.b
-// possible field selection -- ns.a['b'], a['b']
+// possible variables names -- ns.a.b, a.b
+// possible field selection -- ns.a['b'], a['b']
//
// If none of the attributes within the maybe resolves a value, the result is an error.
func (a *maybeAttribute) AddQualifier(qual Qualifier) (Attribute, error) {
@@ -486,43 +457,49 @@ func (a *maybeAttribute) AddQualifier(qual Qualifier) (Attribute, error) {
}
}
// Next, ensure the most specific variable / type reference is searched first.
- a.attrs = append([]NamespacedAttribute{a.fac.AbsoluteAttribute(qual.ID(), augmentedNames...)}, a.attrs...)
+ if len(augmentedNames) != 0 {
+ a.attrs = append([]NamespacedAttribute{a.fac.AbsoluteAttribute(qual.ID(), augmentedNames...)}, a.attrs...)
+ }
return a, nil
}
// Qualify is an implementation of the Qualifier interface method.
-func (a *maybeAttribute) Qualify(vars Activation, obj interface{}) (interface{}, error) {
- val, err := a.Resolve(vars)
- if err != nil {
- return nil, err
- }
- unk, isUnk := val.(types.Unknown)
- if isUnk {
- return unk, nil
- }
- qual, err := a.fac.NewQualifier(nil, a.id, val)
- if err != nil {
- return nil, err
- }
- return qual.Qualify(vars, obj)
+func (a *maybeAttribute) Qualify(vars Activation, obj any) (any, error) {
+ return attrQualify(a.fac, vars, obj, a)
+}
+
+// QualifyIfPresent is an implementation of the Qualifier interface method.
+func (a *maybeAttribute) QualifyIfPresent(vars Activation, obj any, presenceOnly bool) (any, bool, error) {
+ return attrQualifyIfPresent(a.fac, vars, obj, a, presenceOnly)
}
// Resolve follows the variable resolution rules to determine whether the attribute is a variable
// or a field selection.
-func (a *maybeAttribute) Resolve(vars Activation) (interface{}, error) {
+func (a *maybeAttribute) Resolve(vars Activation) (any, error) {
+ var maybeErr error
for _, attr := range a.attrs {
- obj, found, err := attr.TryResolve(vars)
+ obj, err := attr.Resolve(vars)
// Return an error if one is encountered.
if err != nil {
- return nil, err
- }
- // If the object was found, return it.
- if found {
- return obj, nil
+ resErr, ok := err.(*resolutionError)
+ if !ok {
+ return nil, err
+ }
+ // If this was not a missing variable error, return it.
+ if !resErr.isMissingAttribute() {
+ return nil, err
+ }
+ // When the variable is missing in a maybe attribute we defer erroring.
+ if maybeErr == nil {
+ maybeErr = resErr
+ }
+ // Continue attempting to resolve possible variables.
+ continue
}
+ return obj, nil
}
// Else, produce a no such attribute error.
- return nil, fmt.Errorf("no such attribute: %v", a)
+ return nil, maybeErr
}
// String is an implementation of the Stringer interface method.
@@ -540,18 +517,18 @@ type relativeAttribute struct {
// ID is an implementation of the Attribute interface method.
func (a *relativeAttribute) ID() int64 {
- return a.id
+ qualCount := len(a.qualifiers)
+ if qualCount == 0 {
+ return a.id
+ }
+ return a.qualifiers[qualCount-1].ID()
}
-// Cost implements the Coster interface method.
-func (a *relativeAttribute) Cost() (min, max int64) {
- min, max = estimateCost(a.operand)
- for _, qual := range a.qualifiers {
- minQ, maxQ := estimateCost(qual)
- min += minQ
- max += maxQ
- }
- return
+// IsOptional returns trivially false for an attribute as the attribute represents a fully
+// qualified variable name. If the attribute is used in an optional manner, then an attrQualifier
+// is created and marks the attribute as optional.
+func (a *relativeAttribute) IsOptional() bool {
+ return false
}
// AddQualifier implements the Attribute interface method.
@@ -561,24 +538,17 @@ func (a *relativeAttribute) AddQualifier(qual Qualifier) (Attribute, error) {
}
// Qualify is an implementation of the Qualifier interface method.
-func (a *relativeAttribute) Qualify(vars Activation, obj interface{}) (interface{}, error) {
- val, err := a.Resolve(vars)
- if err != nil {
- return nil, err
- }
- unk, isUnk := val.(types.Unknown)
- if isUnk {
- return unk, nil
- }
- qual, err := a.fac.NewQualifier(nil, a.id, val)
- if err != nil {
- return nil, err
- }
- return qual.Qualify(vars, obj)
+func (a *relativeAttribute) Qualify(vars Activation, obj any) (any, error) {
+ return attrQualify(a.fac, vars, obj, a)
+}
+
+// QualifyIfPresent is an implementation of the Qualifier interface method.
+func (a *relativeAttribute) QualifyIfPresent(vars Activation, obj any, presenceOnly bool) (any, bool, error) {
+ return attrQualifyIfPresent(a.fac, vars, obj, a, presenceOnly)
}
// Resolve expression value and qualifier relative to the expression result.
-func (a *relativeAttribute) Resolve(vars Activation) (interface{}, error) {
+func (a *relativeAttribute) Resolve(vars Activation) (any, error) {
// First, evaluate the operand.
v := a.operand.Eval(vars)
if types.IsError(v) {
@@ -587,14 +557,16 @@ func (a *relativeAttribute) Resolve(vars Activation) (interface{}, error) {
if types.IsUnknown(v) {
return v, nil
}
- // Next, qualify it. Qualification handles unknowns as well, so there's no need to recheck.
- var err error
- var obj interface{} = v
- for _, qual := range a.qualifiers {
- obj, err = qual.Qualify(vars, obj)
- if err != nil {
- return nil, err
+ obj, isOpt, err := applyQualifiers(vars, v, a.qualifiers)
+ if err != nil {
+ return nil, err
+ }
+ if isOpt {
+ val := a.adapter.NativeToValue(obj)
+ if types.IsUnknown(val) {
+ return val, nil
}
+ return types.OptionalOf(val), nil
}
return obj, nil
}
@@ -604,42 +576,93 @@ func (a *relativeAttribute) String() string {
return fmt.Sprintf("id: %v, operand: %v", a.id, a.operand)
}
-func newQualifier(adapter ref.TypeAdapter, id int64, v interface{}) (Qualifier, error) {
+func newQualifier(adapter ref.TypeAdapter, id int64, v any, opt bool) (Qualifier, error) {
var qual Qualifier
switch val := v.(type) {
case Attribute:
- return &attrQualifier{id: id, Attribute: val}, nil
+ // Note, attributes are initially identified as non-optional since they represent a top-level
+ // field access; however, when used as a relative qualifier, e.g. a[?b.c], then an attrQualifier
+ // is created which intercepts the IsOptional check for the attribute in order to return the
+ // correct result.
+ return &attrQualifier{
+ id: id,
+ Attribute: val,
+ optional: opt,
+ }, nil
case string:
- qual = &stringQualifier{id: id, value: val, celValue: types.String(val), adapter: adapter}
+ qual = &stringQualifier{
+ id: id,
+ value: val,
+ celValue: types.String(val),
+ adapter: adapter,
+ optional: opt,
+ }
case int:
- qual = &intQualifier{id: id, value: int64(val), celValue: types.Int(val), adapter: adapter}
+ qual = &intQualifier{
+ id: id, value: int64(val), celValue: types.Int(val), adapter: adapter, optional: opt,
+ }
case int32:
- qual = &intQualifier{id: id, value: int64(val), celValue: types.Int(val), adapter: adapter}
+ qual = &intQualifier{
+ id: id, value: int64(val), celValue: types.Int(val), adapter: adapter, optional: opt,
+ }
case int64:
- qual = &intQualifier{id: id, value: val, celValue: types.Int(val), adapter: adapter}
+ qual = &intQualifier{
+ id: id, value: val, celValue: types.Int(val), adapter: adapter, optional: opt,
+ }
case uint:
- qual = &uintQualifier{id: id, value: uint64(val), celValue: types.Uint(val), adapter: adapter}
+ qual = &uintQualifier{
+ id: id, value: uint64(val), celValue: types.Uint(val), adapter: adapter, optional: opt,
+ }
case uint32:
- qual = &uintQualifier{id: id, value: uint64(val), celValue: types.Uint(val), adapter: adapter}
+ qual = &uintQualifier{
+ id: id, value: uint64(val), celValue: types.Uint(val), adapter: adapter, optional: opt,
+ }
case uint64:
- qual = &uintQualifier{id: id, value: val, celValue: types.Uint(val), adapter: adapter}
+ qual = &uintQualifier{
+ id: id, value: val, celValue: types.Uint(val), adapter: adapter, optional: opt,
+ }
case bool:
- qual = &boolQualifier{id: id, value: val, celValue: types.Bool(val), adapter: adapter}
+ qual = &boolQualifier{
+ id: id, value: val, celValue: types.Bool(val), adapter: adapter, optional: opt,
+ }
case float32:
- qual = &doubleQualifier{id: id, value: float64(val), celValue: types.Double(val), adapter: adapter}
+ qual = &doubleQualifier{
+ id: id,
+ value: float64(val),
+ celValue: types.Double(val),
+ adapter: adapter,
+ optional: opt,
+ }
case float64:
- qual = &doubleQualifier{id: id, value: val, celValue: types.Double(val), adapter: adapter}
+ qual = &doubleQualifier{
+ id: id, value: val, celValue: types.Double(val), adapter: adapter, optional: opt,
+ }
case types.String:
- qual = &stringQualifier{id: id, value: string(val), celValue: val, adapter: adapter}
+ qual = &stringQualifier{
+ id: id, value: string(val), celValue: val, adapter: adapter, optional: opt,
+ }
case types.Int:
- qual = &intQualifier{id: id, value: int64(val), celValue: val, adapter: adapter}
+ qual = &intQualifier{
+ id: id, value: int64(val), celValue: val, adapter: adapter, optional: opt,
+ }
case types.Uint:
- qual = &uintQualifier{id: id, value: uint64(val), celValue: val, adapter: adapter}
+ qual = &uintQualifier{
+ id: id, value: uint64(val), celValue: val, adapter: adapter, optional: opt,
+ }
case types.Bool:
- qual = &boolQualifier{id: id, value: bool(val), celValue: val, adapter: adapter}
+ qual = &boolQualifier{
+ id: id, value: bool(val), celValue: val, adapter: adapter, optional: opt,
+ }
case types.Double:
- qual = &doubleQualifier{id: id, value: float64(val), celValue: val, adapter: adapter}
+ qual = &doubleQualifier{
+ id: id, value: float64(val), celValue: val, adapter: adapter, optional: opt,
+ }
+ case types.Unknown:
+ qual = &unknownQualifier{id: id, value: val}
default:
+ if q, ok := v.(Qualifier); ok {
+ return q, nil
+ }
return nil, fmt.Errorf("invalid qualifier type: %T", v)
}
return qual, nil
@@ -648,15 +671,18 @@ func newQualifier(adapter ref.TypeAdapter, id int64, v interface{}) (Qualifier,
type attrQualifier struct {
id int64
Attribute
+ optional bool
}
+// ID implements the Qualifier interface method and returns the qualification instruction id
+// rather than the attribute id.
func (q *attrQualifier) ID() int64 {
return q.id
}
-// Cost returns zero for constant field qualifiers
-func (q *attrQualifier) Cost() (min, max int64) {
- return estimateCost(q.Attribute)
+// IsOptional implements the Qualifier interface method.
+func (q *attrQualifier) IsOptional() bool {
+ return q.optional
}
type stringQualifier struct {
@@ -664,6 +690,7 @@ type stringQualifier struct {
value string
celValue ref.Val
adapter ref.TypeAdapter
+ optional bool
}
// ID is an implementation of the Qualifier interface method.
@@ -671,58 +698,87 @@ func (q *stringQualifier) ID() int64 {
return q.id
}
+// IsOptional implements the Qualifier interface method.
+func (q *stringQualifier) IsOptional() bool {
+ return q.optional
+}
+
// Qualify implements the Qualifier interface method.
-func (q *stringQualifier) Qualify(vars Activation, obj interface{}) (interface{}, error) {
+func (q *stringQualifier) Qualify(vars Activation, obj any) (any, error) {
+ val, _, err := q.qualifyInternal(vars, obj, false, false)
+ return val, err
+}
+
+// QualifyIfPresent is an implementation of the Qualifier interface method.
+func (q *stringQualifier) QualifyIfPresent(vars Activation, obj any, presenceOnly bool) (any, bool, error) {
+ return q.qualifyInternal(vars, obj, true, presenceOnly)
+}
+
+func (q *stringQualifier) qualifyInternal(vars Activation, obj any, presenceTest, presenceOnly bool) (any, bool, error) {
s := q.value
- isMap := false
- isKey := false
switch o := obj.(type) {
- case map[string]interface{}:
- isMap = true
- obj, isKey = o[s]
+ case map[string]any:
+ obj, isKey := o[s]
+ if isKey {
+ return obj, true, nil
+ }
case map[string]string:
- isMap = true
- obj, isKey = o[s]
+ obj, isKey := o[s]
+ if isKey {
+ return obj, true, nil
+ }
case map[string]int:
- isMap = true
- obj, isKey = o[s]
+ obj, isKey := o[s]
+ if isKey {
+ return obj, true, nil
+ }
case map[string]int32:
- isMap = true
- obj, isKey = o[s]
+ obj, isKey := o[s]
+ if isKey {
+ return obj, true, nil
+ }
case map[string]int64:
- isMap = true
- obj, isKey = o[s]
+ obj, isKey := o[s]
+ if isKey {
+ return obj, true, nil
+ }
case map[string]uint:
- isMap = true
- obj, isKey = o[s]
+ obj, isKey := o[s]
+ if isKey {
+ return obj, true, nil
+ }
case map[string]uint32:
- isMap = true
- obj, isKey = o[s]
+ obj, isKey := o[s]
+ if isKey {
+ return obj, true, nil
+ }
case map[string]uint64:
- isMap = true
- obj, isKey = o[s]
+ obj, isKey := o[s]
+ if isKey {
+ return obj, true, nil
+ }
case map[string]float32:
- isMap = true
- obj, isKey = o[s]
+ obj, isKey := o[s]
+ if isKey {
+ return obj, true, nil
+ }
case map[string]float64:
- isMap = true
- obj, isKey = o[s]
+ obj, isKey := o[s]
+ if isKey {
+ return obj, true, nil
+ }
case map[string]bool:
- isMap = true
- obj, isKey = o[s]
- case types.Unknown:
- return o, nil
- default:
- elem, err := refResolve(q.adapter, q.celValue, obj)
- if err != nil {
- return nil, err
+ obj, isKey := o[s]
+ if isKey {
+ return obj, true, nil
}
- return elem, nil
+ default:
+ return refQualify(q.adapter, obj, q.celValue, presenceTest, presenceOnly)
}
- if isMap && !isKey {
- return nil, fmt.Errorf("no such key: %v", s)
+ if presenceTest {
+ return nil, false, nil
}
- return obj, nil
+ return nil, false, missingKey(q.celValue)
}
// Value implements the ConstantQualifier interface
@@ -730,16 +786,12 @@ func (q *stringQualifier) Value() ref.Val {
return q.celValue
}
-// Cost returns zero for constant field qualifiers
-func (q *stringQualifier) Cost() (min, max int64) {
- return 0, 0
-}
-
type intQualifier struct {
id int64
value int64
celValue ref.Val
adapter ref.TypeAdapter
+ optional bool
}
// ID is an implementation of the Qualifier interface method.
@@ -747,97 +799,113 @@ func (q *intQualifier) ID() int64 {
return q.id
}
+// IsOptional implements the Qualifier interface method.
+func (q *intQualifier) IsOptional() bool {
+ return q.optional
+}
+
// Qualify implements the Qualifier interface method.
-func (q *intQualifier) Qualify(vars Activation, obj interface{}) (interface{}, error) {
+func (q *intQualifier) Qualify(vars Activation, obj any) (any, error) {
+ val, _, err := q.qualifyInternal(vars, obj, false, false)
+ return val, err
+}
+
+// QualifyIfPresent is an implementation of the Qualifier interface method.
+func (q *intQualifier) QualifyIfPresent(vars Activation, obj any, presenceOnly bool) (any, bool, error) {
+ return q.qualifyInternal(vars, obj, true, presenceOnly)
+}
+
+func (q *intQualifier) qualifyInternal(vars Activation, obj any, presenceTest, presenceOnly bool) (any, bool, error) {
i := q.value
- isMap := false
- isKey := false
- isIndex := false
+ var isMap bool
switch o := obj.(type) {
// The specialized map types supported by an int qualifier are considerably fewer than the set
// of specialized map types supported by string qualifiers since they are less frequently used
// than string-based map keys. Additional specializations may be added in the future if
// desired.
- case map[int]interface{}:
+ case map[int]any:
isMap = true
- obj, isKey = o[int(i)]
- case map[int32]interface{}:
+ obj, isKey := o[int(i)]
+ if isKey {
+ return obj, true, nil
+ }
+ case map[int32]any:
isMap = true
- obj, isKey = o[int32(i)]
- case map[int64]interface{}:
+ obj, isKey := o[int32(i)]
+ if isKey {
+ return obj, true, nil
+ }
+ case map[int64]any:
isMap = true
- obj, isKey = o[i]
- case []interface{}:
- isIndex = i >= 0 && i < int64(len(o))
+ obj, isKey := o[i]
+ if isKey {
+ return obj, true, nil
+ }
+ case []any:
+ isIndex := i >= 0 && i < int64(len(o))
if isIndex {
- obj = o[i]
+ return o[i], true, nil
}
case []string:
- isIndex = i >= 0 && i < int64(len(o))
+ isIndex := i >= 0 && i < int64(len(o))
if isIndex {
- obj = o[i]
+ return o[i], true, nil
}
case []int:
- isIndex = i >= 0 && i < int64(len(o))
+ isIndex := i >= 0 && i < int64(len(o))
if isIndex {
- obj = o[i]
+ return o[i], true, nil
}
case []int32:
- isIndex = i >= 0 && i < int64(len(o))
+ isIndex := i >= 0 && i < int64(len(o))
if isIndex {
- obj = o[i]
+ return o[i], true, nil
}
case []int64:
- isIndex = i >= 0 && i < int64(len(o))
+ isIndex := i >= 0 && i < int64(len(o))
if isIndex {
- obj = o[i]
+ return o[i], true, nil
}
case []uint:
- isIndex = i >= 0 && i < int64(len(o))
+ isIndex := i >= 0 && i < int64(len(o))
if isIndex {
- obj = o[i]
+ return o[i], true, nil
}
case []uint32:
- isIndex = i >= 0 && i < int64(len(o))
+ isIndex := i >= 0 && i < int64(len(o))
if isIndex {
- obj = o[i]
+ return o[i], true, nil
}
case []uint64:
- isIndex = i >= 0 && i < int64(len(o))
+ isIndex := i >= 0 && i < int64(len(o))
if isIndex {
- obj = o[i]
+ return o[i], true, nil
}
case []float32:
- isIndex = i >= 0 && i < int64(len(o))
+ isIndex := i >= 0 && i < int64(len(o))
if isIndex {
- obj = o[i]
+ return o[i], true, nil
}
case []float64:
- isIndex = i >= 0 && i < int64(len(o))
+ isIndex := i >= 0 && i < int64(len(o))
if isIndex {
- obj = o[i]
+ return o[i], true, nil
}
case []bool:
- isIndex = i >= 0 && i < int64(len(o))
+ isIndex := i >= 0 && i < int64(len(o))
if isIndex {
- obj = o[i]
+ return o[i], true, nil
}
- case types.Unknown:
- return o, nil
default:
- elem, err := refResolve(q.adapter, q.celValue, obj)
- if err != nil {
- return nil, err
- }
- return elem, nil
+ return refQualify(q.adapter, obj, q.celValue, presenceTest, presenceOnly)
}
- if isMap && !isKey {
- return nil, fmt.Errorf("no such key: %v", i)
+ if presenceTest {
+ return nil, false, nil
}
- if !isMap && !isIndex {
- return nil, fmt.Errorf("index out of bounds: %v", i)
+ if isMap {
+ return nil, false, missingKey(q.celValue)
}
- return obj, nil
+ return nil, false, missingIndex(q.celValue)
}
// Value implements the ConstantQualifier interface
@@ -845,16 +913,12 @@ func (q *intQualifier) Value() ref.Val {
return q.celValue
}
-// Cost returns zero for constant field qualifiers
-func (q *intQualifier) Cost() (min, max int64) {
- return 0, 0
-}
-
type uintQualifier struct {
id int64
value uint64
celValue ref.Val
adapter ref.TypeAdapter
+ optional bool
}
// ID is an implementation of the Qualifier interface method.
@@ -862,38 +926,51 @@ func (q *uintQualifier) ID() int64 {
return q.id
}
+// IsOptional implements the Qualifier interface method.
+func (q *uintQualifier) IsOptional() bool {
+ return q.optional
+}
+
// Qualify implements the Qualifier interface method.
-func (q *uintQualifier) Qualify(vars Activation, obj interface{}) (interface{}, error) {
+func (q *uintQualifier) Qualify(vars Activation, obj any) (any, error) {
+ val, _, err := q.qualifyInternal(vars, obj, false, false)
+ return val, err
+}
+
+// QualifyIfPresent is an implementation of the Qualifier interface method.
+func (q *uintQualifier) QualifyIfPresent(vars Activation, obj any, presenceOnly bool) (any, bool, error) {
+ return q.qualifyInternal(vars, obj, true, presenceOnly)
+}
+
+func (q *uintQualifier) qualifyInternal(vars Activation, obj any, presenceTest, presenceOnly bool) (any, bool, error) {
u := q.value
- isMap := false
- isKey := false
switch o := obj.(type) {
// The specialized map types supported by a uint qualifier are considerably fewer than the set
// of specialized map types supported by string qualifiers since they are less frequently used
// than string-based map keys. Additional specializations may be added in the future if
// desired.
- case map[uint]interface{}:
- isMap = true
- obj, isKey = o[uint(u)]
- case map[uint32]interface{}:
- isMap = true
- obj, isKey = o[uint32(u)]
- case map[uint64]interface{}:
- isMap = true
- obj, isKey = o[u]
- case types.Unknown:
- return o, nil
- default:
- elem, err := refResolve(q.adapter, q.celValue, obj)
- if err != nil {
- return nil, err
+ case map[uint]any:
+ obj, isKey := o[uint(u)]
+ if isKey {
+ return obj, true, nil
+ }
+ case map[uint32]any:
+ obj, isKey := o[uint32(u)]
+ if isKey {
+ return obj, true, nil
}
- return elem, nil
+ case map[uint64]any:
+ obj, isKey := o[u]
+ if isKey {
+ return obj, true, nil
+ }
+ default:
+ return refQualify(q.adapter, obj, q.celValue, presenceTest, presenceOnly)
}
- if isMap && !isKey {
- return nil, fmt.Errorf("no such key: %v", u)
+ if presenceTest {
+ return nil, false, nil
}
- return obj, nil
+ return nil, false, missingKey(q.celValue)
}
// Value implements the ConstantQualifier interface
@@ -901,16 +978,12 @@ func (q *uintQualifier) Value() ref.Val {
return q.celValue
}
-// Cost returns zero for constant field qualifiers
-func (q *uintQualifier) Cost() (min, max int64) {
- return 0, 0
-}
-
type boolQualifier struct {
id int64
value bool
celValue ref.Val
adapter ref.TypeAdapter
+ optional bool
}
// ID is an implementation of the Qualifier interface method.
@@ -918,30 +991,37 @@ func (q *boolQualifier) ID() int64 {
return q.id
}
+// IsOptional implements the Qualifier interface method.
+func (q *boolQualifier) IsOptional() bool {
+ return q.optional
+}
+
// Qualify implements the Qualifier interface method.
-func (q *boolQualifier) Qualify(vars Activation, obj interface{}) (interface{}, error) {
+func (q *boolQualifier) Qualify(vars Activation, obj any) (any, error) {
+ val, _, err := q.qualifyInternal(vars, obj, false, false)
+ return val, err
+}
+
+// QualifyIfPresent is an implementation of the Qualifier interface method.
+func (q *boolQualifier) QualifyIfPresent(vars Activation, obj any, presenceOnly bool) (any, bool, error) {
+ return q.qualifyInternal(vars, obj, true, presenceOnly)
+}
+
+func (q *boolQualifier) qualifyInternal(vars Activation, obj any, presenceTest, presenceOnly bool) (any, bool, error) {
b := q.value
- isKey := false
switch o := obj.(type) {
- // The specialized map types supported by a bool qualifier are considerably fewer than the set
- // of specialized map types supported by string qualifiers since they are less frequently used
- // than string-based map keys. Additional specializations may be added in the future if
- // desired.
- case map[bool]interface{}:
- obj, isKey = o[b]
- case types.Unknown:
- return o, nil
- default:
- elem, err := refResolve(q.adapter, q.celValue, obj)
- if err != nil {
- return nil, err
+ case map[bool]any:
+ obj, isKey := o[b]
+ if isKey {
+ return obj, true, nil
}
- return elem, nil
+ default:
+ return refQualify(q.adapter, obj, q.celValue, presenceTest, presenceOnly)
}
- if !isKey {
- return nil, fmt.Errorf("no such key: %v", b)
+ if presenceTest {
+ return nil, false, nil
}
- return obj, nil
+ return nil, false, missingKey(q.celValue)
}
// Value implements the ConstantQualifier interface
@@ -949,11 +1029,6 @@ func (q *boolQualifier) Value() ref.Val {
return q.celValue
}
-// Cost returns zero for constant field qualifiers
-func (q *boolQualifier) Cost() (min, max int64) {
- return 0, 0
-}
-
// fieldQualifier indicates that the qualification is a well-defined field with a known
// field type. When the field type is known this can be used to improve the speed and
// efficiency of field resolution.
@@ -962,6 +1037,7 @@ type fieldQualifier struct {
Name string
FieldType *ref.FieldType
adapter ref.TypeAdapter
+ optional bool
}
// ID is an implementation of the Qualifier interface method.
@@ -969,12 +1045,39 @@ func (q *fieldQualifier) ID() int64 {
return q.id
}
+// IsOptional implements the Qualifier interface method.
+func (q *fieldQualifier) IsOptional() bool {
+ return q.optional
+}
+
// Qualify implements the Qualifier interface method.
-func (q *fieldQualifier) Qualify(vars Activation, obj interface{}) (interface{}, error) {
+func (q *fieldQualifier) Qualify(vars Activation, obj any) (any, error) {
if rv, ok := obj.(ref.Val); ok {
obj = rv.Value()
}
- return q.FieldType.GetFrom(obj)
+ val, err := q.FieldType.GetFrom(obj)
+ if err != nil {
+ return nil, err
+ }
+ return val, nil
+}
+
+// QualifyIfPresent is an implementation of the Qualifier interface method.
+func (q *fieldQualifier) QualifyIfPresent(vars Activation, obj any, presenceOnly bool) (any, bool, error) {
+ if rv, ok := obj.(ref.Val); ok {
+ obj = rv.Value()
+ }
+ if !q.FieldType.IsSet(obj) {
+ return nil, false, nil
+ }
+ if presenceOnly {
+ return nil, true, nil
+ }
+ val, err := q.FieldType.GetFrom(obj)
+ if err != nil {
+ return nil, false, err
+ }
+ return val, true, nil
}
// Value implements the ConstantQualifier interface
@@ -982,11 +1085,6 @@ func (q *fieldQualifier) Value() ref.Val {
return types.String(q.Name)
}
-// Cost returns zero for constant field qualifiers
-func (q *fieldQualifier) Cost() (min, max int64) {
- return 0, 0
-}
-
// doubleQualifier qualifies a CEL object, map, or list using a double value.
//
// This qualifier is used for working with dynamic data like JSON or protobuf.Any where the value
@@ -997,6 +1095,7 @@ type doubleQualifier struct {
value float64
celValue ref.Val
adapter ref.TypeAdapter
+ optional bool
}
// ID is an implementation of the Qualifier interface method.
@@ -1004,48 +1103,237 @@ func (q *doubleQualifier) ID() int64 {
return q.id
}
+// IsOptional implements the Qualifier interface method.
+func (q *doubleQualifier) IsOptional() bool {
+ return q.optional
+}
+
// Qualify implements the Qualifier interface method.
-func (q *doubleQualifier) Qualify(vars Activation, obj interface{}) (interface{}, error) {
- switch o := obj.(type) {
- case types.Unknown:
- return o, nil
- default:
- elem, err := refResolve(q.adapter, q.celValue, obj)
- if err != nil {
- return nil, err
+func (q *doubleQualifier) Qualify(vars Activation, obj any) (any, error) {
+ val, _, err := q.qualifyInternal(vars, obj, false, false)
+ return val, err
+}
+
+func (q *doubleQualifier) QualifyIfPresent(vars Activation, obj any, presenceOnly bool) (any, bool, error) {
+ return q.qualifyInternal(vars, obj, true, presenceOnly)
+}
+
+func (q *doubleQualifier) qualifyInternal(vars Activation, obj any, presenceTest, presenceOnly bool) (any, bool, error) {
+ return refQualify(q.adapter, obj, q.celValue, presenceTest, presenceOnly)
+}
+
+// Value implements the ConstantQualifier interface
+func (q *doubleQualifier) Value() ref.Val {
+ return q.celValue
+}
+
+// unknownQualifier is a simple qualifier which always returns a preconfigured set of unknown values
+// for any value subject to qualification. This is consistent with CEL's unknown handling elsewhere.
+type unknownQualifier struct {
+ id int64
+ value types.Unknown
+}
+
+// ID is an implementation of the Qualifier interface method.
+func (q *unknownQualifier) ID() int64 {
+ return q.id
+}
+
+// IsOptional returns trivially false as an the unknown value is always returned.
+func (q *unknownQualifier) IsOptional() bool {
+ return false
+}
+
+// Qualify returns the unknown value associated with this qualifier.
+func (q *unknownQualifier) Qualify(vars Activation, obj any) (any, error) {
+ return q.value, nil
+}
+
+// QualifyIfPresent is an implementation of the Qualifier interface method.
+func (q *unknownQualifier) QualifyIfPresent(vars Activation, obj any, presenceOnly bool) (any, bool, error) {
+ return q.value, true, nil
+}
+
+// Value implements the ConstantQualifier interface
+func (q *unknownQualifier) Value() ref.Val {
+ return q.value
+}
+
+func applyQualifiers(vars Activation, obj any, qualifiers []Qualifier) (any, bool, error) {
+ optObj, isOpt := obj.(*types.Optional)
+ if isOpt {
+ if !optObj.HasValue() {
+ return optObj, false, nil
}
- return elem, nil
+ obj = optObj.GetValue().Value()
}
+
+ var err error
+ for _, qual := range qualifiers {
+ var qualObj any
+ isOpt = isOpt || qual.IsOptional()
+ if isOpt {
+ var present bool
+ qualObj, present, err = qual.QualifyIfPresent(vars, obj, false)
+ if err != nil {
+ return nil, false, err
+ }
+ if !present {
+ // We return optional none here with a presence of 'false' as the layers
+ // above will attempt to call types.OptionalOf() on a present value if any
+ // of the qualifiers is optional.
+ return types.OptionalNone, false, nil
+ }
+ } else {
+ qualObj, err = qual.Qualify(vars, obj)
+ if err != nil {
+ return nil, false, err
+ }
+ }
+ obj = qualObj
+ }
+ return obj, isOpt, nil
+}
+
+// attrQualify performs a qualification using the result of an attribute evaluation.
+func attrQualify(fac AttributeFactory, vars Activation, obj any, qualAttr Attribute) (any, error) {
+ val, err := qualAttr.Resolve(vars)
+ if err != nil {
+ return nil, err
+ }
+ qual, err := fac.NewQualifier(nil, qualAttr.ID(), val, qualAttr.IsOptional())
+ if err != nil {
+ return nil, err
+ }
+ return qual.Qualify(vars, obj)
}
-// refResolve attempts to convert the value to a CEL value and then uses reflection methods
-// to try and resolve the qualifier.
-func refResolve(adapter ref.TypeAdapter, idx ref.Val, obj interface{}) (ref.Val, error) {
+// attrQualifyIfPresent conditionally performs the qualification of the result of attribute is present
+// on the target object.
+func attrQualifyIfPresent(fac AttributeFactory, vars Activation, obj any, qualAttr Attribute,
+ presenceOnly bool) (any, bool, error) {
+ val, err := qualAttr.Resolve(vars)
+ if err != nil {
+ return nil, false, err
+ }
+ qual, err := fac.NewQualifier(nil, qualAttr.ID(), val, qualAttr.IsOptional())
+ if err != nil {
+ return nil, false, err
+ }
+ return qual.QualifyIfPresent(vars, obj, presenceOnly)
+}
+
+// refQualify attempts to convert the value to a CEL value and then uses reflection methods to try and
+// apply the qualifier with the option to presence test field accesses before retrieving field values.
+func refQualify(adapter ref.TypeAdapter, obj any, idx ref.Val, presenceTest, presenceOnly bool) (ref.Val, bool, error) {
celVal := adapter.NativeToValue(obj)
- mapper, isMapper := celVal.(traits.Mapper)
- if isMapper {
- elem, found := mapper.Find(idx)
- if !found {
- return nil, fmt.Errorf("no such key: %v", idx)
+ switch v := celVal.(type) {
+ case types.Unknown:
+ return v, true, nil
+ case *types.Err:
+ return nil, false, v
+ case traits.Mapper:
+ val, found := v.Find(idx)
+ // If the index is of the wrong type for the map, then it is possible
+ // for the Find call to produce an error.
+ if types.IsError(val) {
+ return nil, false, val.(*types.Err)
}
- return elem, nil
- }
- indexer, isIndexer := celVal.(traits.Indexer)
- if isIndexer {
- elem := indexer.Get(idx)
- if types.IsError(elem) {
- return nil, elem.(*types.Err)
+ if found {
+ return val, true, nil
+ }
+ if presenceTest {
+ return nil, false, nil
+ }
+ return nil, false, missingKey(idx)
+ case traits.Lister:
+ // If the index argument is not a valid numeric type, then it is possible
+ // for the index operation to produce an error.
+ i, err := types.IndexOrError(idx)
+ if err != nil {
+ return nil, false, err
+ }
+ celIndex := types.Int(i)
+ if i >= 0 && celIndex < v.Size().(types.Int) {
+ return v.Get(idx), true, nil
+ }
+ if presenceTest {
+ return nil, false, nil
+ }
+ return nil, false, missingIndex(idx)
+ case traits.Indexer:
+ if presenceTest {
+ ft, ok := v.(traits.FieldTester)
+ if ok {
+ presence := ft.IsSet(idx)
+ if types.IsError(presence) {
+ return nil, false, presence.(*types.Err)
+ }
+ // If not found or presence only test, then return.
+ // Otherwise, if found, obtain the value later on.
+ if presenceOnly || presence == types.False {
+ return nil, presence == types.True, nil
+ }
+ }
+ }
+ val := v.Get(idx)
+ if types.IsError(val) {
+ return nil, false, val.(*types.Err)
+ }
+ return val, true, nil
+ default:
+ if presenceTest {
+ return nil, false, nil
}
- return elem, nil
+ return nil, false, missingKey(idx)
}
- if types.IsUnknown(celVal) {
- return celVal, nil
+}
+
+// resolutionError is a custom error type which encodes the different error states which may
+// occur during attribute resolution.
+type resolutionError struct {
+ missingAttribute string
+ missingIndex ref.Val
+ missingKey ref.Val
+}
+
+func (e *resolutionError) isMissingAttribute() bool {
+ return e.missingAttribute != ""
+}
+
+func missingIndex(missing ref.Val) *resolutionError {
+ return &resolutionError{
+ missingIndex: missing,
+ }
+}
+
+func missingKey(missing ref.Val) *resolutionError {
+ return &resolutionError{
+ missingKey: missing,
}
- // TODO: If the types.Err value contains more than just an error message at some point in the
- // future, then it would be reasonable to return error values as ref.Val types rather than
- // simple go error types.
- if types.IsError(celVal) {
- return nil, celVal.(*types.Err)
+}
+
+func missingAttribute(attr string) *resolutionError {
+ return &resolutionError{
+ missingAttribute: attr,
+ }
+}
+
+// Error implements the error interface method.
+func (e *resolutionError) Error() string {
+ if e.missingKey != nil {
+ return fmt.Sprintf("no such key: %v", e.missingKey)
}
- return nil, fmt.Errorf("no such key: %v", idx)
+ if e.missingIndex != nil {
+ return fmt.Sprintf("index out of bounds: %v", e.missingIndex)
+ }
+ if e.missingAttribute != "" {
+ return fmt.Sprintf("no such attribute(s): %s", e.missingAttribute)
+ }
+ return "invalid attribute"
+}
+
+// Is implements the errors.Is() method used by more recent versions of Go.
+func (e *resolutionError) Is(err error) bool {
+ return err.Error() == e.Error()
}
diff --git a/vendor/github.com/google/cel-go/interpreter/decorators.go b/vendor/github.com/google/cel-go/interpreter/decorators.go
index bdbbad43e2..208487b7d3 100644
--- a/vendor/github.com/google/cel-go/interpreter/decorators.go
+++ b/vendor/github.com/google/cel-go/interpreter/decorators.go
@@ -29,7 +29,7 @@ type InterpretableDecorator func(Interpretable) (Interpretable, error)
func decObserveEval(observer EvalObserver) InterpretableDecorator {
return func(i Interpretable) (Interpretable, error) {
switch inst := i.(type) {
- case *evalWatch, *evalWatchAttr, *evalWatchConst:
+ case *evalWatch, *evalWatchAttr, *evalWatchConst, *evalWatchConstructor:
// these instruction are already watching, return straight-away.
return i, nil
case InterpretableAttribute:
@@ -42,6 +42,11 @@ func decObserveEval(observer EvalObserver) InterpretableDecorator {
InterpretableConst: inst,
observer: observer,
}, nil
+ case InterpretableConstructor:
+ return &evalWatchConstructor{
+ constructor: inst,
+ observer: observer,
+ }, nil
default:
return &evalWatch{
Interpretable: i,
@@ -224,8 +229,8 @@ func maybeOptimizeSetMembership(i Interpretable, inlist InterpretableCall) (Inte
valueSet := make(map[ref.Val]ref.Val)
for it.HasNext() == types.True {
elem := it.Next()
- if !types.IsPrimitiveType(elem) {
- // Note, non-primitive type are not yet supported.
+ if !types.IsPrimitiveType(elem) || elem.Type() == types.BytesType {
+ // Note, non-primitive type are not yet supported, and []byte isn't hashable.
return i, nil
}
valueSet[elem] = types.True
diff --git a/vendor/github.com/google/cel-go/interpreter/formatting.go b/vendor/github.com/google/cel-go/interpreter/formatting.go
new file mode 100644
index 0000000000..6a98f6fa56
--- /dev/null
+++ b/vendor/github.com/google/cel-go/interpreter/formatting.go
@@ -0,0 +1,383 @@
+// Copyright 2023 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package interpreter
+
+import (
+ "errors"
+ "fmt"
+ "strconv"
+ "strings"
+ "unicode"
+
+ "github.com/google/cel-go/common/types"
+ "github.com/google/cel-go/common/types/ref"
+)
+
+type typeVerifier func(int64, ...*types.TypeValue) (bool, error)
+
+// InterpolateFormattedString checks the syntax and cardinality of any string.format calls present in the expression and reports
+// any errors at compile time.
+func InterpolateFormattedString(verifier typeVerifier) InterpretableDecorator {
+ return func(inter Interpretable) (Interpretable, error) {
+ call, ok := inter.(InterpretableCall)
+ if !ok {
+ return inter, nil
+ }
+ if call.OverloadID() != "string_format" {
+ return inter, nil
+ }
+ args := call.Args()
+ if len(args) != 2 {
+ return nil, fmt.Errorf("wrong number of arguments to string.format (expected 2, got %d)", len(args))
+ }
+ fmtStrInter, ok := args[0].(InterpretableConst)
+ if !ok {
+ return inter, nil
+ }
+ var fmtArgsInter InterpretableConstructor
+ fmtArgsInter, ok = args[1].(InterpretableConstructor)
+ if !ok {
+ return inter, nil
+ }
+ if fmtArgsInter.Type() != types.ListType {
+ // don't necessarily return an error since the list may be DynType
+ return inter, nil
+ }
+ formatStr := fmtStrInter.Value().Value().(string)
+ initVals := fmtArgsInter.InitVals()
+
+ formatCheck := &formatCheck{
+ args: initVals,
+ verifier: verifier,
+ }
+ // use a placeholder locale, since locale doesn't affect syntax
+ _, err := ParseFormatString(formatStr, formatCheck, formatCheck, "en_US")
+ if err != nil {
+ return nil, err
+ }
+ seenArgs := formatCheck.argsRequested
+ if len(initVals) > seenArgs {
+ return nil, fmt.Errorf("too many arguments supplied to string.format (expected %d, got %d)", seenArgs, len(initVals))
+ }
+ return inter, nil
+ }
+}
+
+type formatCheck struct {
+ args []Interpretable
+ argsRequested int
+ curArgIndex int64
+ enableCheckArgTypes bool
+ verifier typeVerifier
+}
+
+func (c *formatCheck) String(arg ref.Val, locale string) (string, error) {
+ valid, err := verifyString(c.args[c.curArgIndex], c.verifier)
+ if err != nil {
+ return "", err
+ }
+ if !valid {
+ return "", errors.New("string clause can only be used on strings, bools, bytes, ints, doubles, maps, lists, types, durations, and timestamps")
+ }
+ return "", nil
+}
+
+func (c *formatCheck) Decimal(arg ref.Val, locale string) (string, error) {
+ id := c.args[c.curArgIndex].ID()
+ valid, err := c.verifier(id, types.IntType, types.UintType)
+ if err != nil {
+ return "", err
+ }
+ if !valid {
+ return "", errors.New("integer clause can only be used on integers")
+ }
+ return "", nil
+}
+
+func (c *formatCheck) Fixed(precision *int) func(ref.Val, string) (string, error) {
+ return func(arg ref.Val, locale string) (string, error) {
+ id := c.args[c.curArgIndex].ID()
+ // we allow StringType since "NaN", "Infinity", and "-Infinity" are also valid values
+ valid, err := c.verifier(id, types.DoubleType, types.StringType)
+ if err != nil {
+ return "", err
+ }
+ if !valid {
+ return "", errors.New("fixed-point clause can only be used on doubles")
+ }
+ return "", nil
+ }
+}
+
+func (c *formatCheck) Scientific(precision *int) func(ref.Val, string) (string, error) {
+ return func(arg ref.Val, locale string) (string, error) {
+ id := c.args[c.curArgIndex].ID()
+ valid, err := c.verifier(id, types.DoubleType, types.StringType)
+ if err != nil {
+ return "", err
+ }
+ if !valid {
+ return "", errors.New("scientific clause can only be used on doubles")
+ }
+ return "", nil
+ }
+}
+
+func (c *formatCheck) Binary(arg ref.Val, locale string) (string, error) {
+ id := c.args[c.curArgIndex].ID()
+ valid, err := c.verifier(id, types.IntType, types.UintType, types.BoolType)
+ if err != nil {
+ return "", err
+ }
+ if !valid {
+ return "", errors.New("only integers and bools can be formatted as binary")
+ }
+ return "", nil
+}
+
+func (c *formatCheck) Hex(useUpper bool) func(ref.Val, string) (string, error) {
+ return func(arg ref.Val, locale string) (string, error) {
+ id := c.args[c.curArgIndex].ID()
+ valid, err := c.verifier(id, types.IntType, types.UintType, types.StringType, types.BytesType)
+ if err != nil {
+ return "", err
+ }
+ if !valid {
+ return "", errors.New("only integers, byte buffers, and strings can be formatted as hex")
+ }
+ return "", nil
+ }
+}
+
+func (c *formatCheck) Octal(arg ref.Val, locale string) (string, error) {
+ id := c.args[c.curArgIndex].ID()
+ valid, err := c.verifier(id, types.IntType, types.UintType)
+ if err != nil {
+ return "", err
+ }
+ if !valid {
+ return "", errors.New("octal clause can only be used on integers")
+ }
+ return "", nil
+}
+
+func (c *formatCheck) Arg(index int64) (ref.Val, error) {
+ c.argsRequested++
+ c.curArgIndex = index
+ // return a dummy value - this is immediately passed to back to us
+ // through one of the FormatCallback functions, so anything will do
+ return types.Int(0), nil
+}
+
+func (c *formatCheck) ArgSize() int64 {
+ return int64(len(c.args))
+}
+
+func verifyString(sub Interpretable, verifier typeVerifier) (bool, error) {
+ subVerified, err := verifier(sub.ID(),
+ types.ListType, types.MapType, types.IntType, types.UintType, types.DoubleType,
+ types.BoolType, types.StringType, types.TimestampType, types.BytesType, types.DurationType, types.TypeType, types.NullType)
+ if err != nil {
+ return false, err
+ }
+ if !subVerified {
+ return false, nil
+ }
+ con, ok := sub.(InterpretableConstructor)
+ if ok {
+ members := con.InitVals()
+ for _, m := range members {
+ // recursively verify if we're dealing with a list/map
+ verified, err := verifyString(m, verifier)
+ if err != nil {
+ return false, err
+ }
+ if !verified {
+ return false, nil
+ }
+ }
+ }
+ return true, nil
+
+}
+
+// FormatStringInterpolator is an interface that allows user-defined behavior
+// for formatting clause implementations, as well as argument retrieval.
+// Each function is expected to support the appropriate types as laid out in
+// the string.format documentation, and to return an error if given an inappropriate type.
+type FormatStringInterpolator interface {
+ // String takes a ref.Val and a string representing the current locale identifier
+ // and returns the Val formatted as a string, or an error if one occurred.
+ String(ref.Val, string) (string, error)
+
+ // Decimal takes a ref.Val and a string representing the current locale identifier
+ // and returns the Val formatted as a decimal integer, or an error if one occurred.
+ Decimal(ref.Val, string) (string, error)
+
+ // Fixed takes an int pointer representing precision (or nil if none was given) and
+ // returns a function operating in a similar manner to String and Decimal, taking a
+ // ref.Val and locale and returning the appropriate string. A closure is returned
+ // so precision can be set without needing an additional function call/configuration.
+ Fixed(*int) func(ref.Val, string) (string, error)
+
+ // Scientific functions identically to Fixed, except the string returned from the closure
+ // is expected to be in scientific notation.
+ Scientific(*int) func(ref.Val, string) (string, error)
+
+ // Binary takes a ref.Val and a string representing the current locale identifier
+ // and returns the Val formatted as a binary integer, or an error if one occurred.
+ Binary(ref.Val, string) (string, error)
+
+ // Hex takes a boolean that, if true, indicates the hex string output by the returned
+ // closure should use uppercase letters for A-F.
+ Hex(bool) func(ref.Val, string) (string, error)
+
+ // Octal takes a ref.Val and a string representing the current locale identifier and
+ // returns the Val formatted in octal, or an error if one occurred.
+ Octal(ref.Val, string) (string, error)
+}
+
+// FormatList is an interface that allows user-defined list-like datatypes to be used
+// for formatting clause implementations.
+type FormatList interface {
+ // Arg returns the ref.Val at the given index, or an error if one occurred.
+ Arg(int64) (ref.Val, error)
+ // ArgSize returns the length of the argument list.
+ ArgSize() int64
+}
+
+type clauseImpl func(ref.Val, string) (string, error)
+
+// ParseFormatString formats a string according to the string.format syntax, taking the clause implementations
+// from the provided FormatCallback and the args from the given FormatList.
+func ParseFormatString(formatStr string, callback FormatStringInterpolator, list FormatList, locale string) (string, error) {
+ i := 0
+ argIndex := 0
+ var builtStr strings.Builder
+ for i < len(formatStr) {
+ if formatStr[i] == '%' {
+ if i+1 < len(formatStr) && formatStr[i+1] == '%' {
+ err := builtStr.WriteByte('%')
+ if err != nil {
+ return "", fmt.Errorf("error writing format string: %w", err)
+ }
+ i += 2
+ continue
+ } else {
+ argAny, err := list.Arg(int64(argIndex))
+ if err != nil {
+ return "", err
+ }
+ if i+1 >= len(formatStr) {
+ return "", errors.New("unexpected end of string")
+ }
+ if int64(argIndex) >= list.ArgSize() {
+ return "", fmt.Errorf("index %d out of range", argIndex)
+ }
+ numRead, val, refErr := parseAndFormatClause(formatStr[i:], argAny, callback, list, locale)
+ if refErr != nil {
+ return "", refErr
+ }
+ _, err = builtStr.WriteString(val)
+ if err != nil {
+ return "", fmt.Errorf("error writing format string: %w", err)
+ }
+ i += numRead
+ argIndex++
+ }
+ } else {
+ err := builtStr.WriteByte(formatStr[i])
+ if err != nil {
+ return "", fmt.Errorf("error writing format string: %w", err)
+ }
+ i++
+ }
+ }
+ return builtStr.String(), nil
+}
+
+// parseAndFormatClause parses the format clause at the start of the given string with val, and returns
+// how many characters were consumed and the substituted string form of val, or an error if one occurred.
+func parseAndFormatClause(formatStr string, val ref.Val, callback FormatStringInterpolator, list FormatList, locale string) (int, string, error) {
+ i := 1
+ read, formatter, err := parseFormattingClause(formatStr[i:], callback)
+ i += read
+ if err != nil {
+ return -1, "", fmt.Errorf("could not parse formatting clause: %s", err)
+ }
+
+ valStr, err := formatter(val, locale)
+ if err != nil {
+ return -1, "", fmt.Errorf("error during formatting: %s", err)
+ }
+ return i, valStr, nil
+}
+
+func parseFormattingClause(formatStr string, callback FormatStringInterpolator) (int, clauseImpl, error) {
+ i := 0
+ read, precision, err := parsePrecision(formatStr[i:])
+ i += read
+ if err != nil {
+ return -1, nil, fmt.Errorf("error while parsing precision: %w", err)
+ }
+ r := rune(formatStr[i])
+ i++
+ switch r {
+ case 's':
+ return i, callback.String, nil
+ case 'd':
+ return i, callback.Decimal, nil
+ case 'f':
+ return i, callback.Fixed(precision), nil
+ case 'e':
+ return i, callback.Scientific(precision), nil
+ case 'b':
+ return i, callback.Binary, nil
+ case 'x', 'X':
+ return i, callback.Hex(unicode.IsUpper(r)), nil
+ case 'o':
+ return i, callback.Octal, nil
+ default:
+ return -1, nil, fmt.Errorf("unrecognized formatting clause \"%c\"", r)
+ }
+}
+
+func parsePrecision(formatStr string) (int, *int, error) {
+ i := 0
+ if formatStr[i] != '.' {
+ return i, nil, nil
+ }
+ i++
+ var buffer strings.Builder
+ for {
+ if i >= len(formatStr) {
+ return -1, nil, errors.New("could not find end of precision specifier")
+ }
+ if !isASCIIDigit(rune(formatStr[i])) {
+ break
+ }
+ buffer.WriteByte(formatStr[i])
+ i++
+ }
+ precision, err := strconv.Atoi(buffer.String())
+ if err != nil {
+ return -1, nil, fmt.Errorf("error while converting precision to integer: %w", err)
+ }
+ return i, &precision, nil
+}
+
+func isASCIIDigit(r rune) bool {
+ return r <= unicode.MaxASCII && unicode.IsDigit(r)
+}
diff --git a/vendor/github.com/google/cel-go/interpreter/functions/functions.go b/vendor/github.com/google/cel-go/interpreter/functions/functions.go
index dd1e9ddd5f..9816017522 100644
--- a/vendor/github.com/google/cel-go/interpreter/functions/functions.go
+++ b/vendor/github.com/google/cel-go/interpreter/functions/functions.go
@@ -58,5 +58,5 @@ type UnaryOp func(value ref.Val) ref.Val
type BinaryOp func(lhs ref.Val, rhs ref.Val) ref.Val
// FunctionOp is a function with accepts zero or more arguments and produces
-// an value (as interface{}) or error as a result.
+// a value or error as a result.
type FunctionOp func(values ...ref.Val) ref.Val
diff --git a/vendor/github.com/google/cel-go/interpreter/interpretable.go b/vendor/github.com/google/cel-go/interpreter/interpretable.go
index 4fdd12028b..32e2bcb7de 100644
--- a/vendor/github.com/google/cel-go/interpreter/interpretable.go
+++ b/vendor/github.com/google/cel-go/interpreter/interpretable.go
@@ -15,7 +15,7 @@
package interpreter
import (
- "math"
+ "fmt"
"github.com/google/cel-go/common/operators"
"github.com/google/cel-go/common/overloads"
@@ -64,10 +64,18 @@ type InterpretableAttribute interface {
// Qualify replicates the Attribute.Qualify method to permit extension and interception
// of object qualification.
- Qualify(vars Activation, obj interface{}) (interface{}, error)
+ Qualify(vars Activation, obj any) (any, error)
+
+ // QualifyIfPresent qualifies the object if the qualifier is declared or defined on the object.
+ // The 'presenceOnly' flag indicates that the value is not necessary, just a boolean status as
+ // to whether the qualifier is present.
+ QualifyIfPresent(vars Activation, obj any, presenceOnly bool) (any, bool, error)
+
+ // IsOptional indicates whether the resulting value is an optional type.
+ IsOptional() bool
// Resolve returns the value of the Attribute given the current Activation.
- Resolve(Activation) (interface{}, error)
+ Resolve(Activation) (any, error)
}
// InterpretableCall interface for inspecting Interpretable instructions related to function calls.
@@ -103,10 +111,8 @@ type InterpretableConstructor interface {
// Core Interpretable implementations used during the program planning phase.
type evalTestOnly struct {
- id int64
- op Interpretable
- field types.String
- fieldType *ref.FieldType
+ id int64
+ InterpretableAttribute
}
// ID implements the Interpretable interface method.
@@ -116,44 +122,55 @@ func (test *evalTestOnly) ID() int64 {
// Eval implements the Interpretable interface method.
func (test *evalTestOnly) Eval(ctx Activation) ref.Val {
- // Handle field selection on a proto in the most efficient way possible.
- if test.fieldType != nil {
- opAttr, ok := test.op.(InterpretableAttribute)
- if ok {
- opVal, err := opAttr.Resolve(ctx)
- if err != nil {
- return types.NewErr(err.Error())
- }
- refVal, ok := opVal.(ref.Val)
- if ok {
- opVal = refVal.Value()
- }
- if test.fieldType.IsSet(opVal) {
- return types.True
- }
- return types.False
- }
+ val, err := test.Resolve(ctx)
+ // Return an error if the resolve step fails
+ if err != nil {
+ return types.WrapErr(err)
}
+ if optVal, isOpt := val.(*types.Optional); isOpt {
+ return types.Bool(optVal.HasValue())
+ }
+ return test.Adapter().NativeToValue(val)
+}
+
+// AddQualifier appends a qualifier that will always and only perform a presence test.
+func (test *evalTestOnly) AddQualifier(q Qualifier) (Attribute, error) {
+ cq, ok := q.(ConstantQualifier)
+ if !ok {
+ return nil, fmt.Errorf("test only expressions must have constant qualifiers: %v", q)
+ }
+ return test.InterpretableAttribute.AddQualifier(&testOnlyQualifier{ConstantQualifier: cq})
+}
+
+type testOnlyQualifier struct {
+ ConstantQualifier
+}
- obj := test.op.Eval(ctx)
- tester, ok := obj.(traits.FieldTester)
- if ok {
- return tester.IsSet(test.field)
+// Qualify determines whether the test-only qualifier is present on the input object.
+func (q *testOnlyQualifier) Qualify(vars Activation, obj any) (any, error) {
+ out, present, err := q.ConstantQualifier.QualifyIfPresent(vars, obj, true)
+ if err != nil {
+ return nil, err
+ }
+ if unk, isUnk := out.(types.Unknown); isUnk {
+ return unk, nil
}
- container, ok := obj.(traits.Container)
- if ok {
- return container.Contains(test.field)
+ if opt, isOpt := out.(types.Optional); isOpt {
+ return opt.HasValue(), nil
}
- return types.ValOrErr(obj, "invalid type for field selection.")
+ return present, nil
}
-// Cost provides the heuristic cost of a `has(field)` macro. The cost has at least 1 for determining
-// if the field exists, apart from the cost of accessing the field.
-func (test *evalTestOnly) Cost() (min, max int64) {
- min, max = estimateCost(test.op)
- min++
- max++
- return
+// QualifyIfPresent returns whether the target field in the test-only expression is present.
+func (q *testOnlyQualifier) QualifyIfPresent(vars Activation, obj any, presenceOnly bool) (any, bool, error) {
+ // Only ever test for presence.
+ return q.ConstantQualifier.QualifyIfPresent(vars, obj, true)
+}
+
+// QualifierValueEquals determines whether the test-only constant qualifier equals the input value.
+func (q *testOnlyQualifier) QualifierValueEquals(value any) bool {
+ // The input qualifier will always be of type string
+ return q.ConstantQualifier.Value().Value() == value
}
// NewConstValue creates a new constant valued Interpretable.
@@ -179,11 +196,6 @@ func (cons *evalConst) Eval(ctx Activation) ref.Val {
return cons.val
}
-// Cost returns zero for a constant valued Interpretable.
-func (cons *evalConst) Cost() (min, max int64) {
- return 0, 0
-}
-
// Value implements the InterpretableConst interface method.
func (cons *evalConst) Value() ref.Val {
return cons.val
@@ -233,12 +245,6 @@ func (or *evalOr) Eval(ctx Activation) ref.Val {
return types.ValOrErr(rVal, "no such overload")
}
-// Cost implements the Coster interface method. The minimum possible cost incurs when the left-hand
-// side expr is sufficient in determining the evaluation result.
-func (or *evalOr) Cost() (min, max int64) {
- return calShortCircuitBinaryOpsCost(or.lhs, or.rhs)
-}
-
type evalAnd struct {
id int64
lhs Interpretable
@@ -283,18 +289,6 @@ func (and *evalAnd) Eval(ctx Activation) ref.Val {
return types.ValOrErr(rVal, "no such overload")
}
-// Cost implements the Coster interface method. The minimum possible cost incurs when the left-hand
-// side expr is sufficient in determining the evaluation result.
-func (and *evalAnd) Cost() (min, max int64) {
- return calShortCircuitBinaryOpsCost(and.lhs, and.rhs)
-}
-
-func calShortCircuitBinaryOpsCost(lhs, rhs Interpretable) (min, max int64) {
- lMin, lMax := estimateCost(lhs)
- _, rMax := estimateCost(rhs)
- return lMin, lMax + rMax + 1
-}
-
type evalEq struct {
id int64
lhs Interpretable
@@ -319,11 +313,6 @@ func (eq *evalEq) Eval(ctx Activation) ref.Val {
return types.Equal(lVal, rVal)
}
-// Cost implements the Coster interface method.
-func (eq *evalEq) Cost() (min, max int64) {
- return calExhaustiveBinaryOpsCost(eq.lhs, eq.rhs)
-}
-
// Function implements the InterpretableCall interface method.
func (*evalEq) Function() string {
return operators.Equals
@@ -363,11 +352,6 @@ func (ne *evalNe) Eval(ctx Activation) ref.Val {
return types.Bool(types.Equal(lVal, rVal) != types.True)
}
-// Cost implements the Coster interface method.
-func (ne *evalNe) Cost() (min, max int64) {
- return calExhaustiveBinaryOpsCost(ne.lhs, ne.rhs)
-}
-
// Function implements the InterpretableCall interface method.
func (*evalNe) Function() string {
return operators.NotEquals
@@ -400,11 +384,6 @@ func (zero *evalZeroArity) Eval(ctx Activation) ref.Val {
return zero.impl()
}
-// Cost returns 1 representing the heuristic cost of the function.
-func (zero *evalZeroArity) Cost() (min, max int64) {
- return 1, 1
-}
-
// Function implements the InterpretableCall interface method.
func (zero *evalZeroArity) Function() string {
return zero.function
@@ -456,14 +435,6 @@ func (un *evalUnary) Eval(ctx Activation) ref.Val {
return types.NewErr("no such overload: %s", un.function)
}
-// Cost implements the Coster interface method.
-func (un *evalUnary) Cost() (min, max int64) {
- min, max = estimateCost(un.arg)
- min++ // add cost for function
- max++
- return
-}
-
// Function implements the InterpretableCall interface method.
func (un *evalUnary) Function() string {
return un.function
@@ -522,11 +493,6 @@ func (bin *evalBinary) Eval(ctx Activation) ref.Val {
return types.NewErr("no such overload: %s", bin.function)
}
-// Cost implements the Coster interface method.
-func (bin *evalBinary) Cost() (min, max int64) {
- return calExhaustiveBinaryOpsCost(bin.lhs, bin.rhs)
-}
-
// Function implements the InterpretableCall interface method.
func (bin *evalBinary) Function() string {
return bin.function
@@ -593,14 +559,6 @@ func (fn *evalVarArgs) Eval(ctx Activation) ref.Val {
return types.NewErr("no such overload: %s", fn.function)
}
-// Cost implements the Coster interface method.
-func (fn *evalVarArgs) Cost() (min, max int64) {
- min, max = sumOfCost(fn.args)
- min++ // add cost for function
- max++
- return
-}
-
// Function implements the InterpretableCall interface method.
func (fn *evalVarArgs) Function() string {
return fn.function
@@ -617,9 +575,11 @@ func (fn *evalVarArgs) Args() []Interpretable {
}
type evalList struct {
- id int64
- elems []Interpretable
- adapter ref.TypeAdapter
+ id int64
+ elems []Interpretable
+ optionals []bool
+ hasOptionals bool
+ adapter ref.TypeAdapter
}
// ID implements the Interpretable interface method.
@@ -629,14 +589,24 @@ func (l *evalList) ID() int64 {
// Eval implements the Interpretable interface method.
func (l *evalList) Eval(ctx Activation) ref.Val {
- elemVals := make([]ref.Val, len(l.elems))
+ elemVals := make([]ref.Val, 0, len(l.elems))
// If any argument is unknown or error early terminate.
for i, elem := range l.elems {
elemVal := elem.Eval(ctx)
if types.IsUnknownOrError(elemVal) {
return elemVal
}
- elemVals[i] = elemVal
+ if l.hasOptionals && l.optionals[i] {
+ optVal, ok := elemVal.(*types.Optional)
+ if !ok {
+ return invalidOptionalElementInit(elemVal)
+ }
+ if !optVal.HasValue() {
+ continue
+ }
+ elemVal = optVal.GetValue()
+ }
+ elemVals = append(elemVals, elemVal)
}
return l.adapter.NativeToValue(elemVals)
}
@@ -649,16 +619,13 @@ func (l *evalList) Type() ref.Type {
return types.ListType
}
-// Cost implements the Coster interface method.
-func (l *evalList) Cost() (min, max int64) {
- return sumOfCost(l.elems)
-}
-
type evalMap struct {
- id int64
- keys []Interpretable
- vals []Interpretable
- adapter ref.TypeAdapter
+ id int64
+ keys []Interpretable
+ vals []Interpretable
+ optionals []bool
+ hasOptionals bool
+ adapter ref.TypeAdapter
}
// ID implements the Interpretable interface method.
@@ -679,6 +646,17 @@ func (m *evalMap) Eval(ctx Activation) ref.Val {
if types.IsUnknownOrError(valVal) {
return valVal
}
+ if m.hasOptionals && m.optionals[i] {
+ optVal, ok := valVal.(*types.Optional)
+ if !ok {
+ return invalidOptionalEntryInit(keyVal, valVal)
+ }
+ if !optVal.HasValue() {
+ delete(entries, keyVal)
+ continue
+ }
+ valVal = optVal.GetValue()
+ }
entries[keyVal] = valVal
}
return m.adapter.NativeToValue(entries)
@@ -704,19 +682,14 @@ func (m *evalMap) Type() ref.Type {
return types.MapType
}
-// Cost implements the Coster interface method.
-func (m *evalMap) Cost() (min, max int64) {
- kMin, kMax := sumOfCost(m.keys)
- vMin, vMax := sumOfCost(m.vals)
- return kMin + vMin, kMax + vMax
-}
-
type evalObj struct {
- id int64
- typeName string
- fields []string
- vals []Interpretable
- provider ref.TypeProvider
+ id int64
+ typeName string
+ fields []string
+ vals []Interpretable
+ optionals []bool
+ hasOptionals bool
+ provider ref.TypeProvider
}
// ID implements the Interpretable interface method.
@@ -733,6 +706,17 @@ func (o *evalObj) Eval(ctx Activation) ref.Val {
if types.IsUnknownOrError(val) {
return val
}
+ if o.hasOptionals && o.optionals[i] {
+ optVal, ok := val.(*types.Optional)
+ if !ok {
+ return invalidOptionalEntryInit(field, val)
+ }
+ if !optVal.HasValue() {
+ delete(fieldVals, field)
+ continue
+ }
+ val = optVal.GetValue()
+ }
fieldVals[field] = val
}
return o.provider.NewValue(o.typeName, fieldVals)
@@ -746,21 +730,6 @@ func (o *evalObj) Type() ref.Type {
return types.NewObjectTypeValue(o.typeName)
}
-// Cost implements the Coster interface method.
-func (o *evalObj) Cost() (min, max int64) {
- return sumOfCost(o.vals)
-}
-
-func sumOfCost(interps []Interpretable) (min, max int64) {
- min, max = 0, 0
- for _, in := range interps {
- minT, maxT := estimateCost(in)
- min += minT
- max += maxT
- }
- return
-}
-
type evalFold struct {
id int64
accuVar string
@@ -842,38 +811,6 @@ func (fold *evalFold) Eval(ctx Activation) ref.Val {
return res
}
-// Cost implements the Coster interface method.
-func (fold *evalFold) Cost() (min, max int64) {
- // Compute the cost for evaluating iterRange.
- iMin, iMax := estimateCost(fold.iterRange)
-
- // Compute the size of iterRange. If the size depends on the input, return the maximum possible
- // cost range.
- foldRange := fold.iterRange.Eval(EmptyActivation())
- if !foldRange.Type().HasTrait(traits.IterableType) {
- return 0, math.MaxInt64
- }
- var rangeCnt int64
- it := foldRange.(traits.Iterable).Iterator()
- for it.HasNext() == types.True {
- it.Next()
- rangeCnt++
- }
- aMin, aMax := estimateCost(fold.accu)
- cMin, cMax := estimateCost(fold.cond)
- sMin, sMax := estimateCost(fold.step)
- rMin, rMax := estimateCost(fold.result)
- if fold.exhaustive {
- cMin = cMin * rangeCnt
- sMin = sMin * rangeCnt
- }
-
- // The cond and step costs are multiplied by size(iterRange). The minimum possible cost incurs
- // when the evaluation result can be determined by the first iteration.
- return iMin + aMin + cMin + sMin + rMin,
- iMax + aMax + cMax*rangeCnt + sMax*rangeCnt + rMax
-}
-
// Optional Interpretable implementations that specialize, subsume, or extend the core evaluation
// plan via decorators.
@@ -893,17 +830,15 @@ func (e *evalSetMembership) ID() int64 {
// Eval implements the Interpretable interface method.
func (e *evalSetMembership) Eval(ctx Activation) ref.Val {
val := e.arg.Eval(ctx)
+ if types.IsUnknownOrError(val) {
+ return val
+ }
if ret, found := e.valueSet[val]; found {
return ret
}
return types.False
}
-// Cost implements the Coster interface method.
-func (e *evalSetMembership) Cost() (min, max int64) {
- return estimateCost(e.arg)
-}
-
// evalWatch is an Interpretable implementation that wraps the execution of a given
// expression so that it may observe the computed value and send it to an observer.
type evalWatch struct {
@@ -918,15 +853,10 @@ func (e *evalWatch) Eval(ctx Activation) ref.Val {
return val
}
-// Cost implements the Coster interface method.
-func (e *evalWatch) Cost() (min, max int64) {
- return estimateCost(e.Interpretable)
-}
-
-// evalWatchAttr describes a watcher of an instAttr Interpretable.
+// evalWatchAttr describes a watcher of an InterpretableAttribute Interpretable.
//
// Since the watcher may be selected against at a later stage in program planning, the watcher
-// must implement the instAttr interface by proxy.
+// must implement the InterpretableAttribute interface by proxy.
type evalWatchAttr struct {
InterpretableAttribute
observer EvalObserver
@@ -953,11 +883,6 @@ func (e *evalWatchAttr) AddQualifier(q Qualifier) (Attribute, error) {
return e, err
}
-// Cost implements the Coster interface method.
-func (e *evalWatchAttr) Cost() (min, max int64) {
- return estimateCost(e.InterpretableAttribute)
-}
-
// Eval implements the Interpretable interface method.
func (e *evalWatchAttr) Eval(vars Activation) ref.Val {
val := e.InterpretableAttribute.Eval(vars)
@@ -973,17 +898,12 @@ type evalWatchConstQual struct {
adapter ref.TypeAdapter
}
-// Cost implements the Coster interface method.
-func (e *evalWatchConstQual) Cost() (min, max int64) {
- return estimateCost(e.ConstantQualifier)
-}
-
// Qualify observes the qualification of a object via a constant boolean, int, string, or uint.
-func (e *evalWatchConstQual) Qualify(vars Activation, obj interface{}) (interface{}, error) {
+func (e *evalWatchConstQual) Qualify(vars Activation, obj any) (any, error) {
out, err := e.ConstantQualifier.Qualify(vars, obj)
var val ref.Val
if err != nil {
- val = types.NewErr(err.Error())
+ val = types.WrapErr(err)
} else {
val = e.adapter.NativeToValue(out)
}
@@ -991,8 +911,25 @@ func (e *evalWatchConstQual) Qualify(vars Activation, obj interface{}) (interfac
return out, err
}
+// QualifyIfPresent conditionally qualifies the variable and only records a value if one is present.
+func (e *evalWatchConstQual) QualifyIfPresent(vars Activation, obj any, presenceOnly bool) (any, bool, error) {
+ out, present, err := e.ConstantQualifier.QualifyIfPresent(vars, obj, presenceOnly)
+ var val ref.Val
+ if err != nil {
+ val = types.WrapErr(err)
+ } else if out != nil {
+ val = e.adapter.NativeToValue(out)
+ } else if presenceOnly {
+ val = types.Bool(present)
+ }
+ if present || presenceOnly {
+ e.observer(e.ID(), e.ConstantQualifier, val)
+ }
+ return out, present, err
+}
+
// QualifierValueEquals tests whether the incoming value is equal to the qualifying constant.
-func (e *evalWatchConstQual) QualifierValueEquals(value interface{}) bool {
+func (e *evalWatchConstQual) QualifierValueEquals(value any) bool {
qve, ok := e.ConstantQualifier.(qualifierValueEquator)
return ok && qve.QualifierValueEquals(value)
}
@@ -1004,17 +941,12 @@ type evalWatchQual struct {
adapter ref.TypeAdapter
}
-// Cost implements the Coster interface method.
-func (e *evalWatchQual) Cost() (min, max int64) {
- return estimateCost(e.Qualifier)
-}
-
// Qualify observes the qualification of a object via a value computed at runtime.
-func (e *evalWatchQual) Qualify(vars Activation, obj interface{}) (interface{}, error) {
+func (e *evalWatchQual) Qualify(vars Activation, obj any) (any, error) {
out, err := e.Qualifier.Qualify(vars, obj)
var val ref.Val
if err != nil {
- val = types.NewErr(err.Error())
+ val = types.WrapErr(err)
} else {
val = e.adapter.NativeToValue(out)
}
@@ -1022,6 +954,23 @@ func (e *evalWatchQual) Qualify(vars Activation, obj interface{}) (interface{},
return out, err
}
+// QualifyIfPresent conditionally qualifies the variable and only records a value if one is present.
+func (e *evalWatchQual) QualifyIfPresent(vars Activation, obj any, presenceOnly bool) (any, bool, error) {
+ out, present, err := e.Qualifier.QualifyIfPresent(vars, obj, presenceOnly)
+ var val ref.Val
+ if err != nil {
+ val = types.WrapErr(err)
+ } else if out != nil {
+ val = e.adapter.NativeToValue(out)
+ } else if presenceOnly {
+ val = types.Bool(present)
+ }
+ if present || presenceOnly {
+ e.observer(e.ID(), e.Qualifier, val)
+ }
+ return out, present, err
+}
+
// evalWatchConst describes a watcher of an instConst Interpretable.
type evalWatchConst struct {
InterpretableConst
@@ -1035,11 +984,6 @@ func (e *evalWatchConst) Eval(vars Activation) ref.Val {
return val
}
-// Cost implements the Coster interface method.
-func (e *evalWatchConst) Cost() (min, max int64) {
- return estimateCost(e.InterpretableConst)
-}
-
// evalExhaustiveOr is just like evalOr, but does not short-circuit argument evaluation.
type evalExhaustiveOr struct {
id int64
@@ -1078,12 +1022,7 @@ func (or *evalExhaustiveOr) Eval(ctx Activation) ref.Val {
if types.IsError(lVal) {
return lVal
}
- return types.ValOrErr(rVal, "no such overload")
-}
-
-// Cost implements the Coster interface method.
-func (or *evalExhaustiveOr) Cost() (min, max int64) {
- return calExhaustiveBinaryOpsCost(or.lhs, or.rhs)
+ return types.MaybeNoSuchOverloadErr(rVal)
}
// evalExhaustiveAnd is just like evalAnd, but does not short-circuit argument evaluation.
@@ -1124,18 +1063,7 @@ func (and *evalExhaustiveAnd) Eval(ctx Activation) ref.Val {
if types.IsError(lVal) {
return lVal
}
- return types.ValOrErr(rVal, "no such overload")
-}
-
-// Cost implements the Coster interface method.
-func (and *evalExhaustiveAnd) Cost() (min, max int64) {
- return calExhaustiveBinaryOpsCost(and.lhs, and.rhs)
-}
-
-func calExhaustiveBinaryOpsCost(lhs, rhs Interpretable) (min, max int64) {
- lMin, lMax := estimateCost(lhs)
- rMin, rMax := estimateCost(rhs)
- return lMin + rMin + 1, lMax + rMax + 1
+ return types.MaybeNoSuchOverloadErr(rVal)
}
// evalExhaustiveConditional is like evalConditional, but does not short-circuit argument
@@ -1154,77 +1082,114 @@ func (cond *evalExhaustiveConditional) ID() int64 {
// Eval implements the Interpretable interface method.
func (cond *evalExhaustiveConditional) Eval(ctx Activation) ref.Val {
cVal := cond.attr.expr.Eval(ctx)
- tVal, err := cond.attr.truthy.Resolve(ctx)
- if err != nil {
- return types.NewErr(err.Error())
- }
- fVal, err := cond.attr.falsy.Resolve(ctx)
- if err != nil {
- return types.NewErr(err.Error())
- }
+ tVal, tErr := cond.attr.truthy.Resolve(ctx)
+ fVal, fErr := cond.attr.falsy.Resolve(ctx)
cBool, ok := cVal.(types.Bool)
if !ok {
return types.ValOrErr(cVal, "no such overload")
}
if cBool {
+ if tErr != nil {
+ return types.WrapErr(tErr)
+ }
return cond.adapter.NativeToValue(tVal)
}
+ if fErr != nil {
+ return types.WrapErr(fErr)
+ }
return cond.adapter.NativeToValue(fVal)
}
-// Cost implements the Coster interface method.
-func (cond *evalExhaustiveConditional) Cost() (min, max int64) {
- return cond.attr.Cost()
-}
-
// evalAttr evaluates an Attribute value.
type evalAttr struct {
- adapter ref.TypeAdapter
- attr Attribute
+ adapter ref.TypeAdapter
+ attr Attribute
+ optional bool
}
+var _ InterpretableAttribute = &evalAttr{}
+
// ID of the attribute instruction.
func (a *evalAttr) ID() int64 {
return a.attr.ID()
}
-// AddQualifier implements the instAttr interface method.
+// AddQualifier implements the InterpretableAttribute interface method.
func (a *evalAttr) AddQualifier(qual Qualifier) (Attribute, error) {
attr, err := a.attr.AddQualifier(qual)
a.attr = attr
return attr, err
}
-// Attr implements the instAttr interface method.
+// Attr implements the InterpretableAttribute interface method.
func (a *evalAttr) Attr() Attribute {
return a.attr
}
-// Adapter implements the instAttr interface method.
+// Adapter implements the InterpretableAttribute interface method.
func (a *evalAttr) Adapter() ref.TypeAdapter {
return a.adapter
}
-// Cost implements the Coster interface method.
-func (a *evalAttr) Cost() (min, max int64) {
- return estimateCost(a.attr)
-}
-
// Eval implements the Interpretable interface method.
func (a *evalAttr) Eval(ctx Activation) ref.Val {
v, err := a.attr.Resolve(ctx)
if err != nil {
- return types.NewErr(err.Error())
+ return types.WrapErr(err)
}
return a.adapter.NativeToValue(v)
}
// Qualify proxies to the Attribute's Qualify method.
-func (a *evalAttr) Qualify(ctx Activation, obj interface{}) (interface{}, error) {
+func (a *evalAttr) Qualify(ctx Activation, obj any) (any, error) {
return a.attr.Qualify(ctx, obj)
}
+// QualifyIfPresent proxies to the Attribute's QualifyIfPresent method.
+func (a *evalAttr) QualifyIfPresent(ctx Activation, obj any, presenceOnly bool) (any, bool, error) {
+ return a.attr.QualifyIfPresent(ctx, obj, presenceOnly)
+}
+
+func (a *evalAttr) IsOptional() bool {
+ return a.optional
+}
+
// Resolve proxies to the Attribute's Resolve method.
-func (a *evalAttr) Resolve(ctx Activation) (interface{}, error) {
+func (a *evalAttr) Resolve(ctx Activation) (any, error) {
return a.attr.Resolve(ctx)
}
+
+type evalWatchConstructor struct {
+ constructor InterpretableConstructor
+ observer EvalObserver
+}
+
+// InitVals implements the InterpretableConstructor InitVals function.
+func (c *evalWatchConstructor) InitVals() []Interpretable {
+ return c.constructor.InitVals()
+}
+
+// Type implements the InterpretableConstructor Type function.
+func (c *evalWatchConstructor) Type() ref.Type {
+ return c.constructor.Type()
+}
+
+// ID implements the Interpretable ID function.
+func (c *evalWatchConstructor) ID() int64 {
+ return c.constructor.ID()
+}
+
+// Eval implements the Interpretable Eval function.
+func (c *evalWatchConstructor) Eval(ctx Activation) ref.Val {
+ val := c.constructor.Eval(ctx)
+ c.observer(c.ID(), c.constructor, val)
+ return val
+}
+
+func invalidOptionalEntryInit(field any, value ref.Val) ref.Val {
+ return types.NewErr("cannot initialize optional entry '%v' from non-optional value %v", field, value)
+}
+
+func invalidOptionalElementInit(value ref.Val) ref.Val {
+ return types.NewErr("cannot initialize optional list element from non-optional value %v", value)
+}
diff --git a/vendor/github.com/google/cel-go/interpreter/interpreter.go b/vendor/github.com/google/cel-go/interpreter/interpreter.go
index b3fd14f8b3..707a6105a1 100644
--- a/vendor/github.com/google/cel-go/interpreter/interpreter.go
+++ b/vendor/github.com/google/cel-go/interpreter/interpreter.go
@@ -29,19 +29,17 @@ import (
type Interpreter interface {
// NewInterpretable creates an Interpretable from a checked expression and an
// optional list of InterpretableDecorator values.
- NewInterpretable(checked *exprpb.CheckedExpr,
- decorators ...InterpretableDecorator) (Interpretable, error)
+ NewInterpretable(checked *exprpb.CheckedExpr, decorators ...InterpretableDecorator) (Interpretable, error)
// NewUncheckedInterpretable returns an Interpretable from a parsed expression
// and an optional list of InterpretableDecorator values.
- NewUncheckedInterpretable(expr *exprpb.Expr,
- decorators ...InterpretableDecorator) (Interpretable, error)
+ NewUncheckedInterpretable(expr *exprpb.Expr, decorators ...InterpretableDecorator) (Interpretable, error)
}
// EvalObserver is a functional interface that accepts an expression id and an observed value.
// The id identifies the expression that was evaluated, the programStep is the Interpretable or Qualifier that
// was evaluated and value is the result of the evaluation.
-type EvalObserver func(id int64, programStep interface{}, value ref.Val)
+type EvalObserver func(id int64, programStep any, value ref.Val)
// Observe constructs a decorator that calls all the provided observers in order after evaluating each Interpretable
// or Qualifier during program evaluation.
@@ -49,7 +47,7 @@ func Observe(observers ...EvalObserver) InterpretableDecorator {
if len(observers) == 1 {
return decObserveEval(observers[0])
}
- observeFn := func(id int64, programStep interface{}, val ref.Val) {
+ observeFn := func(id int64, programStep any, val ref.Val) {
for _, observer := range observers {
observer(id, programStep, val)
}
@@ -96,7 +94,7 @@ func TrackState(state EvalState) InterpretableDecorator {
// This decorator is not thread-safe, and the EvalState must be reset between Eval()
// calls.
func EvalStateObserver(state EvalState) EvalObserver {
- return func(id int64, programStep interface{}, val ref.Val) {
+ return func(id int64, programStep any, val ref.Val) {
state.SetValue(id, val)
}
}
diff --git a/vendor/github.com/google/cel-go/interpreter/planner.go b/vendor/github.com/google/cel-go/interpreter/planner.go
index 882e0419a5..0b65d0fa90 100644
--- a/vendor/github.com/google/cel-go/interpreter/planner.go
+++ b/vendor/github.com/google/cel-go/interpreter/planner.go
@@ -20,7 +20,6 @@ import (
"github.com/google/cel-go/common/containers"
"github.com/google/cel-go/common/operators"
- "github.com/google/cel-go/common/types"
"github.com/google/cel-go/common/types/ref"
"github.com/google/cel-go/interpreter/functions"
@@ -189,16 +188,7 @@ func (p *planner) planSelect(expr *exprpb.Expr) (Interpretable, error) {
if err != nil {
return nil, err
}
-
- // Determine the field type if this is a proto message type.
- var fieldType *ref.FieldType
opType := p.typeMap[sel.GetOperand().GetId()]
- if opType.GetMessageType() != "" {
- ft, found := p.provider.FindFieldType(opType.GetMessageType(), sel.GetField())
- if found && ft.IsSet != nil && ft.GetFrom != nil {
- fieldType = ft
- }
- }
// If the Select was marked TestOnly, this is a presence test.
//
@@ -211,37 +201,31 @@ func (p *planner) planSelect(expr *exprpb.Expr) (Interpretable, error) {
// If a string named 'a.b.c' is declared in the environment and referenced within `has(a.b.c)`,
// it is not clear whether has should error or follow the convention defined for structured
// values.
- if sel.TestOnly {
- // Return the test only eval expression.
- return &evalTestOnly{
- id: expr.GetId(),
- field: types.String(sel.GetField()),
- fieldType: fieldType,
- op: op,
- }, nil
- }
- // Build a qualifier.
- qual, err := p.attrFactory.NewQualifier(
- opType, expr.GetId(), sel.GetField())
- if err != nil {
- return nil, err
- }
- // Lastly, create a field selection Interpretable.
+
+ // Establish the attribute reference.
attr, isAttr := op.(InterpretableAttribute)
- if isAttr {
- _, err = attr.AddQualifier(qual)
- return attr, err
+ if !isAttr {
+ attr, err = p.relativeAttr(op.ID(), op, false)
+ if err != nil {
+ return nil, err
+ }
}
- relAttr, err := p.relativeAttr(op.ID(), op)
+ // Build a qualifier for the attribute.
+ qual, err := p.attrFactory.NewQualifier(opType, expr.GetId(), sel.GetField(), false)
if err != nil {
return nil, err
}
- _, err = relAttr.AddQualifier(qual)
- if err != nil {
- return nil, err
+ // Modify the attribute to be test-only.
+ if sel.GetTestOnly() {
+ attr = &evalTestOnly{
+ id: expr.GetId(),
+ InterpretableAttribute: attr,
+ }
}
- return relAttr, nil
+ // Append the qualifier on the attribute.
+ _, err = attr.AddQualifier(qual)
+ return attr, err
}
// planCall creates a callable Interpretable while specializing for common functions and invocation
@@ -286,7 +270,9 @@ func (p *planner) planCall(expr *exprpb.Expr) (Interpretable, error) {
case operators.NotEquals:
return p.planCallNotEqual(expr, args)
case operators.Index:
- return p.planCallIndex(expr, args)
+ return p.planCallIndex(expr, args, false)
+ case operators.OptSelect, operators.OptIndex:
+ return p.planCallIndex(expr, args, true)
}
// Otherwise, generate Interpretable calls specialized by argument count.
@@ -423,8 +409,7 @@ func (p *planner) planCallVarArgs(expr *exprpb.Expr,
}
// planCallEqual generates an equals (==) Interpretable.
-func (p *planner) planCallEqual(expr *exprpb.Expr,
- args []Interpretable) (Interpretable, error) {
+func (p *planner) planCallEqual(expr *exprpb.Expr, args []Interpretable) (Interpretable, error) {
return &evalEq{
id: expr.GetId(),
lhs: args[0],
@@ -433,8 +418,7 @@ func (p *planner) planCallEqual(expr *exprpb.Expr,
}
// planCallNotEqual generates a not equals (!=) Interpretable.
-func (p *planner) planCallNotEqual(expr *exprpb.Expr,
- args []Interpretable) (Interpretable, error) {
+func (p *planner) planCallNotEqual(expr *exprpb.Expr, args []Interpretable) (Interpretable, error) {
return &evalNe{
id: expr.GetId(),
lhs: args[0],
@@ -443,8 +427,7 @@ func (p *planner) planCallNotEqual(expr *exprpb.Expr,
}
// planCallLogicalAnd generates a logical and (&&) Interpretable.
-func (p *planner) planCallLogicalAnd(expr *exprpb.Expr,
- args []Interpretable) (Interpretable, error) {
+func (p *planner) planCallLogicalAnd(expr *exprpb.Expr, args []Interpretable) (Interpretable, error) {
return &evalAnd{
id: expr.GetId(),
lhs: args[0],
@@ -453,8 +436,7 @@ func (p *planner) planCallLogicalAnd(expr *exprpb.Expr,
}
// planCallLogicalOr generates a logical or (||) Interpretable.
-func (p *planner) planCallLogicalOr(expr *exprpb.Expr,
- args []Interpretable) (Interpretable, error) {
+func (p *planner) planCallLogicalOr(expr *exprpb.Expr, args []Interpretable) (Interpretable, error) {
return &evalOr{
id: expr.GetId(),
lhs: args[0],
@@ -463,10 +445,8 @@ func (p *planner) planCallLogicalOr(expr *exprpb.Expr,
}
// planCallConditional generates a conditional / ternary (c ? t : f) Interpretable.
-func (p *planner) planCallConditional(expr *exprpb.Expr,
- args []Interpretable) (Interpretable, error) {
+func (p *planner) planCallConditional(expr *exprpb.Expr, args []Interpretable) (Interpretable, error) {
cond := args[0]
-
t := args[1]
var tAttr Attribute
truthyAttr, isTruthyAttr := t.(InterpretableAttribute)
@@ -493,48 +473,54 @@ func (p *planner) planCallConditional(expr *exprpb.Expr,
// planCallIndex either extends an attribute with the argument to the index operation, or creates
// a relative attribute based on the return of a function call or operation.
-func (p *planner) planCallIndex(expr *exprpb.Expr,
- args []Interpretable) (Interpretable, error) {
+func (p *planner) planCallIndex(expr *exprpb.Expr, args []Interpretable, optional bool) (Interpretable, error) {
op := args[0]
ind := args[1]
- opAttr, err := p.relativeAttr(op.ID(), op)
- if err != nil {
- return nil, err
- }
opType := p.typeMap[expr.GetCallExpr().GetTarget().GetId()]
- indConst, isIndConst := ind.(InterpretableConst)
- if isIndConst {
- qual, err := p.attrFactory.NewQualifier(
- opType, expr.GetId(), indConst.Value())
+
+ // Establish the attribute reference.
+ var err error
+ attr, isAttr := op.(InterpretableAttribute)
+ if !isAttr {
+ attr, err = p.relativeAttr(op.ID(), op, false)
if err != nil {
return nil, err
}
- _, err = opAttr.AddQualifier(qual)
- return opAttr, err
}
- indAttr, isIndAttr := ind.(InterpretableAttribute)
- if isIndAttr {
- qual, err := p.attrFactory.NewQualifier(
- opType, expr.GetId(), indAttr)
- if err != nil {
- return nil, err
- }
- _, err = opAttr.AddQualifier(qual)
- return opAttr, err
+
+ // Construct the qualifier type.
+ var qual Qualifier
+ switch ind := ind.(type) {
+ case InterpretableConst:
+ qual, err = p.attrFactory.NewQualifier(opType, expr.GetId(), ind.Value(), optional)
+ case InterpretableAttribute:
+ qual, err = p.attrFactory.NewQualifier(opType, expr.GetId(), ind, optional)
+ default:
+ qual, err = p.relativeAttr(expr.GetId(), ind, optional)
}
- indQual, err := p.relativeAttr(expr.GetId(), ind)
if err != nil {
return nil, err
}
- _, err = opAttr.AddQualifier(indQual)
- return opAttr, err
+
+ // Add the qualifier to the attribute
+ _, err = attr.AddQualifier(qual)
+ return attr, err
}
// planCreateList generates a list construction Interpretable.
func (p *planner) planCreateList(expr *exprpb.Expr) (Interpretable, error) {
list := expr.GetListExpr()
- elems := make([]Interpretable, len(list.GetElements()))
- for i, elem := range list.GetElements() {
+ optionalIndices := list.GetOptionalIndices()
+ elements := list.GetElements()
+ optionals := make([]bool, len(elements))
+ for _, index := range optionalIndices {
+ if index < 0 || index >= int32(len(elements)) {
+ return nil, fmt.Errorf("optional index %d out of element bounds [0, %d]", index, len(elements))
+ }
+ optionals[index] = true
+ }
+ elems := make([]Interpretable, len(elements))
+ for i, elem := range elements {
elemVal, err := p.Plan(elem)
if err != nil {
return nil, err
@@ -542,9 +528,11 @@ func (p *planner) planCreateList(expr *exprpb.Expr) (Interpretable, error) {
elems[i] = elemVal
}
return &evalList{
- id: expr.GetId(),
- elems: elems,
- adapter: p.adapter,
+ id: expr.GetId(),
+ elems: elems,
+ optionals: optionals,
+ hasOptionals: len(optionals) != 0,
+ adapter: p.adapter,
}, nil
}
@@ -555,6 +543,7 @@ func (p *planner) planCreateStruct(expr *exprpb.Expr) (Interpretable, error) {
return p.planCreateObj(expr)
}
entries := str.GetEntries()
+ optionals := make([]bool, len(entries))
keys := make([]Interpretable, len(entries))
vals := make([]Interpretable, len(entries))
for i, entry := range entries {
@@ -569,23 +558,27 @@ func (p *planner) planCreateStruct(expr *exprpb.Expr) (Interpretable, error) {
return nil, err
}
vals[i] = valVal
+ optionals[i] = entry.GetOptionalEntry()
}
return &evalMap{
- id: expr.GetId(),
- keys: keys,
- vals: vals,
- adapter: p.adapter,
+ id: expr.GetId(),
+ keys: keys,
+ vals: vals,
+ optionals: optionals,
+ hasOptionals: len(optionals) != 0,
+ adapter: p.adapter,
}, nil
}
// planCreateObj generates an object construction Interpretable.
func (p *planner) planCreateObj(expr *exprpb.Expr) (Interpretable, error) {
obj := expr.GetStructExpr()
- typeName, defined := p.resolveTypeName(obj.MessageName)
+ typeName, defined := p.resolveTypeName(obj.GetMessageName())
if !defined {
- return nil, fmt.Errorf("unknown type: %s", typeName)
+ return nil, fmt.Errorf("unknown type: %s", obj.GetMessageName())
}
entries := obj.GetEntries()
+ optionals := make([]bool, len(entries))
fields := make([]string, len(entries))
vals := make([]Interpretable, len(entries))
for i, entry := range entries {
@@ -595,13 +588,16 @@ func (p *planner) planCreateObj(expr *exprpb.Expr) (Interpretable, error) {
return nil, err
}
vals[i] = val
+ optionals[i] = entry.GetOptionalEntry()
}
return &evalObj{
- id: expr.GetId(),
- typeName: typeName,
- fields: fields,
- vals: vals,
- provider: p.provider,
+ id: expr.GetId(),
+ typeName: typeName,
+ fields: fields,
+ vals: vals,
+ optionals: optionals,
+ hasOptionals: len(optionals) != 0,
+ provider: p.provider,
}, nil
}
@@ -753,14 +749,18 @@ func (p *planner) resolveFunction(expr *exprpb.Expr) (*exprpb.Expr, string, stri
return target, fnName, ""
}
-func (p *planner) relativeAttr(id int64, eval Interpretable) (InterpretableAttribute, error) {
+// relativeAttr indicates that the attribute in this case acts as a qualifier and as such needs to
+// be observed to ensure that it's evaluation value is properly recorded for state tracking.
+func (p *planner) relativeAttr(id int64, eval Interpretable, opt bool) (InterpretableAttribute, error) {
eAttr, ok := eval.(InterpretableAttribute)
if !ok {
eAttr = &evalAttr{
- adapter: p.adapter,
- attr: p.attrFactory.RelativeAttribute(id, eval),
+ adapter: p.adapter,
+ attr: p.attrFactory.RelativeAttribute(id, eval),
+ optional: opt,
}
}
+ // This looks like it should either decorate the new evalAttr node, or early return the InterpretableAttribute
decAttr, err := p.decorate(eAttr, nil)
if err != nil {
return nil, err
diff --git a/vendor/github.com/google/cel-go/interpreter/prune.go b/vendor/github.com/google/cel-go/interpreter/prune.go
index eab46e0c06..d1b5d6bd6b 100644
--- a/vendor/github.com/google/cel-go/interpreter/prune.go
+++ b/vendor/github.com/google/cel-go/interpreter/prune.go
@@ -16,6 +16,7 @@ package interpreter
import (
"github.com/google/cel-go/common/operators"
+ "github.com/google/cel-go/common/overloads"
"github.com/google/cel-go/common/types"
"github.com/google/cel-go/common/types/ref"
"github.com/google/cel-go/common/types/traits"
@@ -26,6 +27,7 @@ import (
type astPruner struct {
expr *exprpb.Expr
+ macroCalls map[int64]*exprpb.Expr
state EvalState
nextExprID int64
}
@@ -65,13 +67,22 @@ type astPruner struct {
// compiled and constant folded expressions, but is not willing to constant
// fold(and thus cache results of) some external calls, then they can prepare
// the overloads accordingly.
-func PruneAst(expr *exprpb.Expr, state EvalState) *exprpb.Expr {
+func PruneAst(expr *exprpb.Expr, macroCalls map[int64]*exprpb.Expr, state EvalState) *exprpb.ParsedExpr {
+ pruneState := NewEvalState()
+ for _, id := range state.IDs() {
+ v, _ := state.Value(id)
+ pruneState.SetValue(id, v)
+ }
pruner := &astPruner{
expr: expr,
- state: state,
- nextExprID: 1}
- newExpr, _ := pruner.prune(expr)
- return newExpr
+ macroCalls: macroCalls,
+ state: pruneState,
+ nextExprID: getMaxID(expr)}
+ newExpr, _ := pruner.maybePrune(expr)
+ return &exprpb.ParsedExpr{
+ Expr: newExpr,
+ SourceInfo: &exprpb.SourceInfo{MacroCalls: pruner.macroCalls},
+ }
}
func (p *astPruner) createLiteral(id int64, val *exprpb.Constant) *exprpb.Expr {
@@ -84,28 +95,50 @@ func (p *astPruner) createLiteral(id int64, val *exprpb.Constant) *exprpb.Expr {
}
func (p *astPruner) maybeCreateLiteral(id int64, val ref.Val) (*exprpb.Expr, bool) {
- switch val.Type() {
- case types.BoolType:
+ switch v := val.(type) {
+ case types.Bool:
+ p.state.SetValue(id, val)
return p.createLiteral(id,
- &exprpb.Constant{ConstantKind: &exprpb.Constant_BoolValue{BoolValue: val.Value().(bool)}}), true
- case types.IntType:
+ &exprpb.Constant{ConstantKind: &exprpb.Constant_BoolValue{BoolValue: bool(v)}}), true
+ case types.Bytes:
+ p.state.SetValue(id, val)
return p.createLiteral(id,
- &exprpb.Constant{ConstantKind: &exprpb.Constant_Int64Value{Int64Value: val.Value().(int64)}}), true
- case types.UintType:
+ &exprpb.Constant{ConstantKind: &exprpb.Constant_BytesValue{BytesValue: []byte(v)}}), true
+ case types.Double:
+ p.state.SetValue(id, val)
return p.createLiteral(id,
- &exprpb.Constant{ConstantKind: &exprpb.Constant_Uint64Value{Uint64Value: val.Value().(uint64)}}), true
- case types.StringType:
+ &exprpb.Constant{ConstantKind: &exprpb.Constant_DoubleValue{DoubleValue: float64(v)}}), true
+ case types.Duration:
+ p.state.SetValue(id, val)
+ durationString := string(v.ConvertToType(types.StringType).(types.String))
+ return &exprpb.Expr{
+ Id: id,
+ ExprKind: &exprpb.Expr_CallExpr{
+ CallExpr: &exprpb.Expr_Call{
+ Function: overloads.TypeConvertDuration,
+ Args: []*exprpb.Expr{
+ p.createLiteral(p.nextID(),
+ &exprpb.Constant{ConstantKind: &exprpb.Constant_StringValue{StringValue: durationString}}),
+ },
+ },
+ },
+ }, true
+ case types.Int:
+ p.state.SetValue(id, val)
return p.createLiteral(id,
- &exprpb.Constant{ConstantKind: &exprpb.Constant_StringValue{StringValue: val.Value().(string)}}), true
- case types.DoubleType:
+ &exprpb.Constant{ConstantKind: &exprpb.Constant_Int64Value{Int64Value: int64(v)}}), true
+ case types.Uint:
+ p.state.SetValue(id, val)
return p.createLiteral(id,
- &exprpb.Constant{ConstantKind: &exprpb.Constant_DoubleValue{DoubleValue: val.Value().(float64)}}), true
- case types.BytesType:
+ &exprpb.Constant{ConstantKind: &exprpb.Constant_Uint64Value{Uint64Value: uint64(v)}}), true
+ case types.String:
+ p.state.SetValue(id, val)
return p.createLiteral(id,
- &exprpb.Constant{ConstantKind: &exprpb.Constant_BytesValue{BytesValue: val.Value().([]byte)}}), true
- case types.NullType:
+ &exprpb.Constant{ConstantKind: &exprpb.Constant_StringValue{StringValue: string(v)}}), true
+ case types.Null:
+ p.state.SetValue(id, val)
return p.createLiteral(id,
- &exprpb.Constant{ConstantKind: &exprpb.Constant_NullValue{NullValue: val.Value().(structpb.NullValue)}}), true
+ &exprpb.Constant{ConstantKind: &exprpb.Constant_NullValue{NullValue: v.Value().(structpb.NullValue)}}), true
}
// Attempt to build a list literal.
@@ -123,6 +156,7 @@ func (p *astPruner) maybeCreateLiteral(id int64, val ref.Val) (*exprpb.Expr, boo
}
elemExprs[i] = elemExpr
}
+ p.state.SetValue(id, val)
return &exprpb.Expr{
Id: id,
ExprKind: &exprpb.Expr_ListExpr{
@@ -162,6 +196,7 @@ func (p *astPruner) maybeCreateLiteral(id int64, val ref.Val) (*exprpb.Expr, boo
entries[i] = entry
i++
}
+ p.state.SetValue(id, val)
return &exprpb.Expr{
Id: id,
ExprKind: &exprpb.Expr_StructExpr{
@@ -177,70 +212,147 @@ func (p *astPruner) maybeCreateLiteral(id int64, val ref.Val) (*exprpb.Expr, boo
return nil, false
}
-func (p *astPruner) maybePruneAndOr(node *exprpb.Expr) (*exprpb.Expr, bool) {
- if !p.existsWithUnknownValue(node.GetId()) {
+func (p *astPruner) maybePruneOptional(elem *exprpb.Expr) (*exprpb.Expr, bool) {
+ elemVal, found := p.value(elem.GetId())
+ if found && elemVal.Type() == types.OptionalType {
+ opt := elemVal.(*types.Optional)
+ if !opt.HasValue() {
+ return nil, true
+ }
+ if newElem, pruned := p.maybeCreateLiteral(elem.GetId(), opt.GetValue()); pruned {
+ return newElem, true
+ }
+ }
+ return elem, false
+}
+
+func (p *astPruner) maybePruneIn(node *exprpb.Expr) (*exprpb.Expr, bool) {
+ // elem in list
+ call := node.GetCallExpr()
+ val, exists := p.maybeValue(call.GetArgs()[1].GetId())
+ if !exists {
return nil, false
}
+ if sz, ok := val.(traits.Sizer); ok && sz.Size() == types.IntZero {
+ return p.maybeCreateLiteral(node.GetId(), types.False)
+ }
+ return nil, false
+}
+func (p *astPruner) maybePruneLogicalNot(node *exprpb.Expr) (*exprpb.Expr, bool) {
+ call := node.GetCallExpr()
+ arg := call.GetArgs()[0]
+ val, exists := p.maybeValue(arg.GetId())
+ if !exists {
+ return nil, false
+ }
+ if b, ok := val.(types.Bool); ok {
+ return p.maybeCreateLiteral(node.GetId(), !b)
+ }
+ return nil, false
+}
+
+func (p *astPruner) maybePruneOr(node *exprpb.Expr) (*exprpb.Expr, bool) {
call := node.GetCallExpr()
// We know result is unknown, so we have at least one unknown arg
// and if one side is a known value, we know we can ignore it.
- if p.existsWithKnownValue(call.Args[0].GetId()) {
- return call.Args[1], true
+ if v, exists := p.maybeValue(call.GetArgs()[0].GetId()); exists {
+ if v == types.True {
+ return p.maybeCreateLiteral(node.GetId(), types.True)
+ }
+ return call.GetArgs()[1], true
}
- if p.existsWithKnownValue(call.Args[1].GetId()) {
- return call.Args[0], true
+ if v, exists := p.maybeValue(call.GetArgs()[1].GetId()); exists {
+ if v == types.True {
+ return p.maybeCreateLiteral(node.GetId(), types.True)
+ }
+ return call.GetArgs()[0], true
}
return nil, false
}
-func (p *astPruner) maybePruneConditional(node *exprpb.Expr) (*exprpb.Expr, bool) {
- if !p.existsWithUnknownValue(node.GetId()) {
- return nil, false
+func (p *astPruner) maybePruneAnd(node *exprpb.Expr) (*exprpb.Expr, bool) {
+ call := node.GetCallExpr()
+ // We know result is unknown, so we have at least one unknown arg
+ // and if one side is a known value, we know we can ignore it.
+ if v, exists := p.maybeValue(call.GetArgs()[0].GetId()); exists {
+ if v == types.False {
+ return p.maybeCreateLiteral(node.GetId(), types.False)
+ }
+ return call.GetArgs()[1], true
+ }
+ if v, exists := p.maybeValue(call.GetArgs()[1].GetId()); exists {
+ if v == types.False {
+ return p.maybeCreateLiteral(node.GetId(), types.False)
+ }
+ return call.GetArgs()[0], true
}
+ return nil, false
+}
+func (p *astPruner) maybePruneConditional(node *exprpb.Expr) (*exprpb.Expr, bool) {
call := node.GetCallExpr()
- condVal, condValueExists := p.value(call.Args[0].GetId())
- if !condValueExists || types.IsUnknownOrError(condVal) {
+ cond, exists := p.maybeValue(call.GetArgs()[0].GetId())
+ if !exists {
return nil, false
}
-
- if condVal.Value().(bool) {
- return call.Args[1], true
+ if cond.Value().(bool) {
+ return call.GetArgs()[1], true
}
- return call.Args[2], true
+ return call.GetArgs()[2], true
}
func (p *astPruner) maybePruneFunction(node *exprpb.Expr) (*exprpb.Expr, bool) {
+ if _, exists := p.value(node.GetId()); !exists {
+ return nil, false
+ }
call := node.GetCallExpr()
- if call.Function == operators.LogicalOr || call.Function == operators.LogicalAnd {
- return p.maybePruneAndOr(node)
+ if call.Function == operators.LogicalOr {
+ return p.maybePruneOr(node)
+ }
+ if call.Function == operators.LogicalAnd {
+ return p.maybePruneAnd(node)
}
if call.Function == operators.Conditional {
return p.maybePruneConditional(node)
}
-
+ if call.Function == operators.In {
+ return p.maybePruneIn(node)
+ }
+ if call.Function == operators.LogicalNot {
+ return p.maybePruneLogicalNot(node)
+ }
return nil, false
}
+func (p *astPruner) maybePrune(node *exprpb.Expr) (*exprpb.Expr, bool) {
+ return p.prune(node)
+}
+
func (p *astPruner) prune(node *exprpb.Expr) (*exprpb.Expr, bool) {
if node == nil {
return node, false
}
- val, valueExists := p.value(node.GetId())
- if valueExists && !types.IsUnknownOrError(val) {
+ val, valueExists := p.maybeValue(node.GetId())
+ if valueExists {
if newNode, ok := p.maybeCreateLiteral(node.GetId(), val); ok {
+ delete(p.macroCalls, node.GetId())
return newNode, true
}
}
+ if macro, found := p.macroCalls[node.GetId()]; found {
+ // prune the expression in terms of the macro call instead of the expanded form.
+ if newMacro, pruned := p.prune(macro); pruned {
+ p.macroCalls[node.GetId()] = newMacro
+ }
+ }
// We have either an unknown/error value, or something we don't want to
// transform, or expression was not evaluated. If possible, drill down
// more.
-
switch node.GetExprKind().(type) {
case *exprpb.Expr_SelectExpr:
- if operand, pruned := p.prune(node.GetSelectExpr().GetOperand()); pruned {
+ if operand, pruned := p.maybePrune(node.GetSelectExpr().GetOperand()); pruned {
return &exprpb.Expr{
Id: node.GetId(),
ExprKind: &exprpb.Expr_SelectExpr{
@@ -253,10 +365,6 @@ func (p *astPruner) prune(node *exprpb.Expr) (*exprpb.Expr, bool) {
}, true
}
case *exprpb.Expr_CallExpr:
- if newExpr, pruned := p.maybePruneFunction(node); pruned {
- newExpr, _ = p.prune(newExpr)
- return newExpr, true
- }
var prunedCall bool
call := node.GetCallExpr()
args := call.GetArgs()
@@ -268,40 +376,75 @@ func (p *astPruner) prune(node *exprpb.Expr) (*exprpb.Expr, bool) {
}
for i, arg := range args {
newArgs[i] = arg
- if newArg, prunedArg := p.prune(arg); prunedArg {
+ if newArg, prunedArg := p.maybePrune(arg); prunedArg {
prunedCall = true
newArgs[i] = newArg
}
}
- if newTarget, prunedTarget := p.prune(call.GetTarget()); prunedTarget {
+ if newTarget, prunedTarget := p.maybePrune(call.GetTarget()); prunedTarget {
prunedCall = true
newCall.Target = newTarget
}
+ newNode := &exprpb.Expr{
+ Id: node.GetId(),
+ ExprKind: &exprpb.Expr_CallExpr{
+ CallExpr: newCall,
+ },
+ }
+ if newExpr, pruned := p.maybePruneFunction(newNode); pruned {
+ newExpr, _ = p.maybePrune(newExpr)
+ return newExpr, true
+ }
if prunedCall {
- return &exprpb.Expr{
- Id: node.GetId(),
- ExprKind: &exprpb.Expr_CallExpr{
- CallExpr: newCall,
- },
- }, true
+ return newNode, true
}
case *exprpb.Expr_ListExpr:
elems := node.GetListExpr().GetElements()
- newElems := make([]*exprpb.Expr, len(elems))
+ optIndices := node.GetListExpr().GetOptionalIndices()
+ optIndexMap := map[int32]bool{}
+ for _, i := range optIndices {
+ optIndexMap[i] = true
+ }
+ newOptIndexMap := make(map[int32]bool, len(optIndexMap))
+ newElems := make([]*exprpb.Expr, 0, len(elems))
var prunedList bool
+
+ prunedIdx := 0
for i, elem := range elems {
- newElems[i] = elem
- if newElem, prunedElem := p.prune(elem); prunedElem {
- newElems[i] = newElem
+ _, isOpt := optIndexMap[int32(i)]
+ if isOpt {
+ newElem, pruned := p.maybePruneOptional(elem)
+ if pruned {
+ prunedList = true
+ if newElem != nil {
+ newElems = append(newElems, newElem)
+ prunedIdx++
+ }
+ continue
+ }
+ newOptIndexMap[int32(prunedIdx)] = true
+ }
+ if newElem, prunedElem := p.maybePrune(elem); prunedElem {
+ newElems = append(newElems, newElem)
prunedList = true
+ } else {
+ newElems = append(newElems, elem)
}
+ prunedIdx++
+ }
+ optIndices = make([]int32, len(newOptIndexMap))
+ idx := 0
+ for i := range newOptIndexMap {
+ optIndices[idx] = i
+ idx++
}
if prunedList {
return &exprpb.Expr{
Id: node.GetId(),
ExprKind: &exprpb.Expr_ListExpr{
ListExpr: &exprpb.Expr_CreateList{
- Elements: newElems,
+ Elements: newElems,
+ OptionalIndices: optIndices,
},
},
}, true
@@ -313,8 +456,8 @@ func (p *astPruner) prune(node *exprpb.Expr) (*exprpb.Expr, bool) {
newEntries := make([]*exprpb.Expr_CreateStruct_Entry, len(entries))
for i, entry := range entries {
newEntries[i] = entry
- newKey, prunedKey := p.prune(entry.GetMapKey())
- newValue, prunedValue := p.prune(entry.GetValue())
+ newKey, prunedKey := p.maybePrune(entry.GetMapKey())
+ newValue, prunedValue := p.maybePrune(entry.GetValue())
if !prunedKey && !prunedValue {
continue
}
@@ -331,6 +474,7 @@ func (p *astPruner) prune(node *exprpb.Expr) (*exprpb.Expr, bool) {
MapKey: newKey,
}
}
+ newEntry.OptionalEntry = entry.GetOptionalEntry()
newEntries[i] = newEntry
}
if prunedStruct {
@@ -344,27 +488,6 @@ func (p *astPruner) prune(node *exprpb.Expr) (*exprpb.Expr, bool) {
},
}, true
}
- case *exprpb.Expr_ComprehensionExpr:
- compre := node.GetComprehensionExpr()
- // Only the range of the comprehension is pruned since the state tracking only records
- // the last iteration of the comprehension and not each step in the evaluation which
- // means that the any residuals computed in between might be inaccurate.
- if newRange, pruned := p.prune(compre.GetIterRange()); pruned {
- return &exprpb.Expr{
- Id: node.GetId(),
- ExprKind: &exprpb.Expr_ComprehensionExpr{
- ComprehensionExpr: &exprpb.Expr_Comprehension{
- IterVar: compre.GetIterVar(),
- IterRange: newRange,
- AccuVar: compre.GetAccuVar(),
- AccuInit: compre.GetAccuInit(),
- LoopCondition: compre.GetLoopCondition(),
- LoopStep: compre.GetLoopStep(),
- Result: compre.GetResult(),
- },
- },
- }, true
- }
}
return node, false
}
@@ -374,24 +497,82 @@ func (p *astPruner) value(id int64) (ref.Val, bool) {
return val, (found && val != nil)
}
-func (p *astPruner) existsWithUnknownValue(id int64) bool {
- val, valueExists := p.value(id)
- return valueExists && types.IsUnknown(val)
+func (p *astPruner) maybeValue(id int64) (ref.Val, bool) {
+ val, found := p.value(id)
+ if !found || types.IsUnknownOrError(val) {
+ return nil, false
+ }
+ return val, true
}
-func (p *astPruner) existsWithKnownValue(id int64) bool {
- val, valueExists := p.value(id)
- return valueExists && !types.IsUnknown(val)
+func (p *astPruner) nextID() int64 {
+ next := p.nextExprID
+ p.nextExprID++
+ return next
}
-func (p *astPruner) nextID() int64 {
- for {
- _, found := p.state.Value(p.nextExprID)
- if !found {
- next := p.nextExprID
- p.nextExprID++
- return next
+type astVisitor struct {
+ // visitEntry is called on every expr node, including those within a map/struct entry.
+ visitExpr func(expr *exprpb.Expr)
+ // visitEntry is called before entering the key, value of a map/struct entry.
+ visitEntry func(entry *exprpb.Expr_CreateStruct_Entry)
+}
+
+func getMaxID(expr *exprpb.Expr) int64 {
+ maxID := int64(1)
+ visit(expr, maxIDVisitor(&maxID))
+ return maxID
+}
+
+func maxIDVisitor(maxID *int64) astVisitor {
+ return astVisitor{
+ visitExpr: func(e *exprpb.Expr) {
+ if e.GetId() >= *maxID {
+ *maxID = e.GetId() + 1
+ }
+ },
+ visitEntry: func(e *exprpb.Expr_CreateStruct_Entry) {
+ if e.GetId() >= *maxID {
+ *maxID = e.GetId() + 1
+ }
+ },
+ }
+}
+
+func visit(expr *exprpb.Expr, visitor astVisitor) {
+ exprs := []*exprpb.Expr{expr}
+ for len(exprs) != 0 {
+ e := exprs[0]
+ visitor.visitExpr(e)
+ exprs = exprs[1:]
+ switch e.GetExprKind().(type) {
+ case *exprpb.Expr_SelectExpr:
+ exprs = append(exprs, e.GetSelectExpr().GetOperand())
+ case *exprpb.Expr_CallExpr:
+ call := e.GetCallExpr()
+ if call.GetTarget() != nil {
+ exprs = append(exprs, call.GetTarget())
+ }
+ exprs = append(exprs, call.GetArgs()...)
+ case *exprpb.Expr_ComprehensionExpr:
+ compre := e.GetComprehensionExpr()
+ exprs = append(exprs,
+ compre.GetIterRange(),
+ compre.GetAccuInit(),
+ compre.GetLoopCondition(),
+ compre.GetLoopStep(),
+ compre.GetResult())
+ case *exprpb.Expr_ListExpr:
+ list := e.GetListExpr()
+ exprs = append(exprs, list.GetElements()...)
+ case *exprpb.Expr_StructExpr:
+ for _, entry := range e.GetStructExpr().GetEntries() {
+ visitor.visitEntry(entry)
+ if entry.GetMapKey() != nil {
+ exprs = append(exprs, entry.GetMapKey())
+ }
+ exprs = append(exprs, entry.GetValue())
+ }
}
- p.nextExprID++
}
}
diff --git a/vendor/github.com/google/cel-go/interpreter/runtimecost.go b/vendor/github.com/google/cel-go/interpreter/runtimecost.go
index 06b6b27ef1..80e7f61344 100644
--- a/vendor/github.com/google/cel-go/interpreter/runtimecost.go
+++ b/vendor/github.com/google/cel-go/interpreter/runtimecost.go
@@ -36,7 +36,7 @@ type ActualCostEstimator interface {
// CostObserver provides an observer that tracks runtime cost.
func CostObserver(tracker *CostTracker) EvalObserver {
- observer := func(id int64, programStep interface{}, val ref.Val) {
+ observer := func(id int64, programStep any, val ref.Val) {
switch t := programStep.(type) {
case ConstantQualifier:
// TODO: Push identifiers on to the stack before observing constant qualifiers that apply to them
@@ -53,6 +53,11 @@ func CostObserver(tracker *CostTracker) EvalObserver {
tracker.stack.drop(t.Attr().ID())
tracker.cost += common.SelectAndIdentCost
}
+ if !tracker.presenceTestHasCost {
+ if _, isTestOnly := programStep.(*evalTestOnly); isTestOnly {
+ tracker.cost -= common.SelectAndIdentCost
+ }
+ }
case *evalExhaustiveConditional:
// Ternary has no direct cost. All cost is from the conditional and the true/false branch expressions.
tracker.stack.drop(t.attr.falsy.ID(), t.attr.truthy.ID(), t.attr.expr.ID())
@@ -95,21 +100,58 @@ func CostObserver(tracker *CostTracker) EvalObserver {
return observer
}
-// CostTracker represents the information needed for tacking runtime cost
+// CostTrackerOption configures the behavior of CostTracker objects.
+type CostTrackerOption func(*CostTracker) error
+
+// CostTrackerLimit sets the runtime limit on the evaluation cost during execution and will terminate the expression
+// evaluation if the limit is exceeded.
+func CostTrackerLimit(limit uint64) CostTrackerOption {
+ return func(tracker *CostTracker) error {
+ tracker.Limit = &limit
+ return nil
+ }
+}
+
+// PresenceTestHasCost determines whether presence testing has a cost of one or zero.
+// Defaults to presence test has a cost of one.
+func PresenceTestHasCost(hasCost bool) CostTrackerOption {
+ return func(tracker *CostTracker) error {
+ tracker.presenceTestHasCost = hasCost
+ return nil
+ }
+}
+
+// NewCostTracker creates a new CostTracker with a given estimator and a set of functional CostTrackerOption values.
+func NewCostTracker(estimator ActualCostEstimator, opts ...CostTrackerOption) (*CostTracker, error) {
+ tracker := &CostTracker{
+ Estimator: estimator,
+ presenceTestHasCost: true,
+ }
+ for _, opt := range opts {
+ err := opt(tracker)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return tracker, nil
+}
+
+// CostTracker represents the information needed for tracking runtime cost.
type CostTracker struct {
- Estimator ActualCostEstimator
- Limit *uint64
+ Estimator ActualCostEstimator
+ Limit *uint64
+ presenceTestHasCost bool
cost uint64
stack refValStack
}
// ActualCost returns the runtime cost
-func (c CostTracker) ActualCost() uint64 {
+func (c *CostTracker) ActualCost() uint64 {
return c.cost
}
-func (c CostTracker) costCall(call InterpretableCall, argValues []ref.Val, result ref.Val) uint64 {
+func (c *CostTracker) costCall(call InterpretableCall, argValues []ref.Val, result ref.Val) uint64 {
var cost uint64
if c.Estimator != nil {
callCost := c.Estimator.CallCost(call.Function(), call.OverloadID(), argValues, result)
@@ -122,7 +164,7 @@ func (c CostTracker) costCall(call InterpretableCall, argValues []ref.Val, resul
// if user has their own implementation of ActualCostEstimator, make sure to cover the mapping between overloadId and cost calculation
switch call.OverloadID() {
// O(n) functions
- case overloads.StartsWithString, overloads.EndsWithString, overloads.StringToBytes, overloads.BytesToString:
+ case overloads.StartsWithString, overloads.EndsWithString, overloads.StringToBytes, overloads.BytesToString, overloads.ExtQuoteString, overloads.ExtFormatString:
cost += uint64(math.Ceil(float64(c.actualSize(argValues[0])) * common.StringTraversalCostFactor))
case overloads.InList:
// If a list is composed entirely of constant values this is O(1), but we don't account for that here.
@@ -179,7 +221,7 @@ func (c CostTracker) costCall(call InterpretableCall, argValues []ref.Val, resul
}
// actualSize returns the size of value
-func (c CostTracker) actualSize(value ref.Val) uint64 {
+func (c *CostTracker) actualSize(value ref.Val) uint64 {
if sz, ok := value.(traits.Sizer); ok {
return uint64(sz.Size().(types.Int))
}
diff --git a/vendor/github.com/google/cel-go/parser/BUILD.bazel b/vendor/github.com/google/cel-go/parser/BUILD.bazel
index b76e6e4844..67ecc95543 100644
--- a/vendor/github.com/google/cel-go/parser/BUILD.bazel
+++ b/vendor/github.com/google/cel-go/parser/BUILD.bazel
@@ -23,8 +23,8 @@ go_library(
"//common/operators:go_default_library",
"//common/runes:go_default_library",
"//parser/gen:go_default_library",
- "@com_github_antlr_antlr4_runtime_go_antlr//:go_default_library",
- "@org_golang_google_genproto//googleapis/api/expr/v1alpha1:go_default_library",
+ "@com_github_antlr_antlr4_runtime_go_antlr_v4//:go_default_library",
+ "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library",
"@org_golang_google_protobuf//proto:go_default_library",
"@org_golang_google_protobuf//types/known/structpb:go_default_library",
],
@@ -34,6 +34,7 @@ go_test(
name = "go_default_test",
size = "small",
srcs = [
+ "helper_test.go",
"parser_test.go",
"unescape_test.go",
"unparser_test.go",
@@ -45,7 +46,8 @@ go_test(
"//common/debug:go_default_library",
"//parser/gen:go_default_library",
"//test:go_default_library",
- "@com_github_antlr_antlr4_runtime_go_antlr//:go_default_library",
+ "@com_github_antlr_antlr4_runtime_go_antlr_v4//:go_default_library",
"@org_golang_google_protobuf//proto:go_default_library",
+ "@org_golang_google_protobuf//testing/protocmp:go_default_library",
],
)
diff --git a/vendor/github.com/google/cel-go/parser/gen/BUILD.bazel b/vendor/github.com/google/cel-go/parser/gen/BUILD.bazel
index 22711310ce..654d1de7aa 100644
--- a/vendor/github.com/google/cel-go/parser/gen/BUILD.bazel
+++ b/vendor/github.com/google/cel-go/parser/gen/BUILD.bazel
@@ -21,6 +21,6 @@ go_library(
],
importpath = "github.com/google/cel-go/parser/gen",
deps = [
- "@com_github_antlr_antlr4_runtime_go_antlr//:go_default_library",
+ "@com_github_antlr_antlr4_runtime_go_antlr_v4//:go_default_library",
],
)
diff --git a/vendor/github.com/google/cel-go/parser/gen/CEL.g4 b/vendor/github.com/google/cel-go/parser/gen/CEL.g4
index 11145ec374..b011da803c 100644
--- a/vendor/github.com/google/cel-go/parser/gen/CEL.g4
+++ b/vendor/github.com/google/cel-go/parser/gen/CEL.g4
@@ -52,16 +52,18 @@ unary
member
: primary # PrimaryExpr
- | member op='.' id=IDENTIFIER (open='(' args=exprList? ')')? # SelectOrCall
- | member op='[' index=expr ']' # Index
- | member op='{' entries=fieldInitializerList? ','? '}' # CreateMessage
+ | member op='.' (opt='?')? id=IDENTIFIER # Select
+ | member op='.' id=IDENTIFIER open='(' args=exprList? ')' # MemberCall
+ | member op='[' (opt='?')? index=expr ']' # Index
;
primary
: leadingDot='.'? id=IDENTIFIER (op='(' args=exprList? ')')? # IdentOrGlobalCall
| '(' e=expr ')' # Nested
- | op='[' elems=exprList? ','? ']' # CreateList
+ | op='[' elems=listInit? ','? ']' # CreateList
| op='{' entries=mapInitializerList? ','? '}' # CreateStruct
+ | leadingDot='.'? ids+=IDENTIFIER (ops+='.' ids+=IDENTIFIER)*
+ op='{' entries=fieldInitializerList? ','? '}' # CreateMessage
| literal # ConstantLiteral
;
@@ -69,23 +71,35 @@ exprList
: e+=expr (',' e+=expr)*
;
+listInit
+ : elems+=optExpr (',' elems+=optExpr)*
+ ;
+
fieldInitializerList
- : fields+=IDENTIFIER cols+=':' values+=expr (',' fields+=IDENTIFIER cols+=':' values+=expr)*
+ : fields+=optField cols+=':' values+=expr (',' fields+=optField cols+=':' values+=expr)*
+ ;
+
+optField
+ : (opt='?')? IDENTIFIER
;
mapInitializerList
- : keys+=expr cols+=':' values+=expr (',' keys+=expr cols+=':' values+=expr)*
+ : keys+=optExpr cols+=':' values+=expr (',' keys+=optExpr cols+=':' values+=expr)*
+ ;
+
+optExpr
+ : (opt='?')? e=expr
;
literal
: sign=MINUS? tok=NUM_INT # Int
- | tok=NUM_UINT # Uint
+ | tok=NUM_UINT # Uint
| sign=MINUS? tok=NUM_FLOAT # Double
- | tok=STRING # String
- | tok=BYTES # Bytes
- | tok=CEL_TRUE # BoolTrue
- | tok=CEL_FALSE # BoolFalse
- | tok=NUL # Null
+ | tok=STRING # String
+ | tok=BYTES # Bytes
+ | tok=CEL_TRUE # BoolTrue
+ | tok=CEL_FALSE # BoolFalse
+ | tok=NUL # Null
;
// Lexer Rules
diff --git a/vendor/github.com/google/cel-go/parser/gen/CEL.interp b/vendor/github.com/google/cel-go/parser/gen/CEL.interp
index 13e3a10d17..75b8bb3e20 100644
--- a/vendor/github.com/google/cel-go/parser/gen/CEL.interp
+++ b/vendor/github.com/google/cel-go/parser/gen/CEL.interp
@@ -87,10 +87,13 @@ unary
member
primary
exprList
+listInit
fieldInitializerList
+optField
mapInitializerList
+optExpr
literal
atn:
-[4, 1, 36, 209, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2, 4, 7, 4, 2, 5, 7, 5, 2, 6, 7, 6, 2, 7, 7, 7, 2, 8, 7, 8, 2, 9, 7, 9, 2, 10, 7, 10, 2, 11, 7, 11, 2, 12, 7, 12, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 1, 36, 8, 1, 1, 2, 1, 2, 1, 2, 5, 2, 41, 8, 2, 10, 2, 12, 2, 44, 9, 2, 1, 3, 1, 3, 1, 3, 5, 3, 49, 8, 3, 10, 3, 12, 3, 52, 9, 3, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 5, 4, 60, 8, 4, 10, 4, 12, 4, 63, 9, 4, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 5, 5, 74, 8, 5, 10, 5, 12, 5, 77, 9, 5, 1, 6, 1, 6, 4, 6, 81, 8, 6, 11, 6, 12, 6, 82, 1, 6, 1, 6, 4, 6, 87, 8, 6, 11, 6, 12, 6, 88, 1, 6, 3, 6, 92, 8, 6, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 3, 7, 102, 8, 7, 1, 7, 3, 7, 105, 8, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 3, 7, 115, 8, 7, 1, 7, 3, 7, 118, 8, 7, 1, 7, 5, 7, 121, 8, 7, 10, 7, 12, 7, 124, 9, 7, 1, 8, 3, 8, 127, 8, 8, 1, 8, 1, 8, 1, 8, 3, 8, 132, 8, 8, 1, 8, 3, 8, 135, 8, 8, 1, 8, 1, 8, 1, 8, 1, 8, 1, 8, 1, 8, 3, 8, 143, 8, 8, 1, 8, 3, 8, 146, 8, 8, 1, 8, 1, 8, 1, 8, 3, 8, 151, 8, 8, 1, 8, 3, 8, 154, 8, 8, 1, 8, 1, 8, 3, 8, 158, 8, 8, 1, 9, 1, 9, 1, 9, 5, 9, 163, 8, 9, 10, 9, 12, 9, 166, 9, 9, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 5, 10, 175, 8, 10, 10, 10, 12, 10, 178, 9, 10, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 5, 11, 188, 8, 11, 10, 11, 12, 11, 191, 9, 11, 1, 12, 3, 12, 194, 8, 12, 1, 12, 1, 12, 1, 12, 3, 12, 199, 8, 12, 1, 12, 1, 12, 1, 12, 1, 12, 1, 12, 1, 12, 3, 12, 207, 8, 12, 1, 12, 0, 3, 8, 10, 14, 13, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 0, 3, 1, 0, 1, 7, 1, 0, 23, 25, 2, 0, 18, 18, 22, 22, 235, 0, 26, 1, 0, 0, 0, 2, 29, 1, 0, 0, 0, 4, 37, 1, 0, 0, 0, 6, 45, 1, 0, 0, 0, 8, 53, 1, 0, 0, 0, 10, 64, 1, 0, 0, 0, 12, 91, 1, 0, 0, 0, 14, 93, 1, 0, 0, 0, 16, 157, 1, 0, 0, 0, 18, 159, 1, 0, 0, 0, 20, 167, 1, 0, 0, 0, 22, 179, 1, 0, 0, 0, 24, 206, 1, 0, 0, 0, 26, 27, 3, 2, 1, 0, 27, 28, 5, 0, 0, 1, 28, 1, 1, 0, 0, 0, 29, 35, 3, 4, 2, 0, 30, 31, 5, 20, 0, 0, 31, 32, 3, 4, 2, 0, 32, 33, 5, 21, 0, 0, 33, 34, 3, 2, 1, 0, 34, 36, 1, 0, 0, 0, 35, 30, 1, 0, 0, 0, 35, 36, 1, 0, 0, 0, 36, 3, 1, 0, 0, 0, 37, 42, 3, 6, 3, 0, 38, 39, 5, 9, 0, 0, 39, 41, 3, 6, 3, 0, 40, 38, 1, 0, 0, 0, 41, 44, 1, 0, 0, 0, 42, 40, 1, 0, 0, 0, 42, 43, 1, 0, 0, 0, 43, 5, 1, 0, 0, 0, 44, 42, 1, 0, 0, 0, 45, 50, 3, 8, 4, 0, 46, 47, 5, 8, 0, 0, 47, 49, 3, 8, 4, 0, 48, 46, 1, 0, 0, 0, 49, 52, 1, 0, 0, 0, 50, 48, 1, 0, 0, 0, 50, 51, 1, 0, 0, 0, 51, 7, 1, 0, 0, 0, 52, 50, 1, 0, 0, 0, 53, 54, 6, 4, -1, 0, 54, 55, 3, 10, 5, 0, 55, 61, 1, 0, 0, 0, 56, 57, 10, 1, 0, 0, 57, 58, 7, 0, 0, 0, 58, 60, 3, 8, 4, 2, 59, 56, 1, 0, 0, 0, 60, 63, 1, 0, 0, 0, 61, 59, 1, 0, 0, 0, 61, 62, 1, 0, 0, 0, 62, 9, 1, 0, 0, 0, 63, 61, 1, 0, 0, 0, 64, 65, 6, 5, -1, 0, 65, 66, 3, 12, 6, 0, 66, 75, 1, 0, 0, 0, 67, 68, 10, 2, 0, 0, 68, 69, 7, 1, 0, 0, 69, 74, 3, 10, 5, 3, 70, 71, 10, 1, 0, 0, 71, 72, 7, 2, 0, 0, 72, 74, 3, 10, 5, 2, 73, 67, 1, 0, 0, 0, 73, 70, 1, 0, 0, 0, 74, 77, 1, 0, 0, 0, 75, 73, 1, 0, 0, 0, 75, 76, 1, 0, 0, 0, 76, 11, 1, 0, 0, 0, 77, 75, 1, 0, 0, 0, 78, 92, 3, 14, 7, 0, 79, 81, 5, 19, 0, 0, 80, 79, 1, 0, 0, 0, 81, 82, 1, 0, 0, 0, 82, 80, 1, 0, 0, 0, 82, 83, 1, 0, 0, 0, 83, 84, 1, 0, 0, 0, 84, 92, 3, 14, 7, 0, 85, 87, 5, 18, 0, 0, 86, 85, 1, 0, 0, 0, 87, 88, 1, 0, 0, 0, 88, 86, 1, 0, 0, 0, 88, 89, 1, 0, 0, 0, 89, 90, 1, 0, 0, 0, 90, 92, 3, 14, 7, 0, 91, 78, 1, 0, 0, 0, 91, 80, 1, 0, 0, 0, 91, 86, 1, 0, 0, 0, 92, 13, 1, 0, 0, 0, 93, 94, 6, 7, -1, 0, 94, 95, 3, 16, 8, 0, 95, 122, 1, 0, 0, 0, 96, 97, 10, 3, 0, 0, 97, 98, 5, 16, 0, 0, 98, 104, 5, 36, 0, 0, 99, 101, 5, 14, 0, 0, 100, 102, 3, 18, 9, 0, 101, 100, 1, 0, 0, 0, 101, 102, 1, 0, 0, 0, 102, 103, 1, 0, 0, 0, 103, 105, 5, 15, 0, 0, 104, 99, 1, 0, 0, 0, 104, 105, 1, 0, 0, 0, 105, 121, 1, 0, 0, 0, 106, 107, 10, 2, 0, 0, 107, 108, 5, 10, 0, 0, 108, 109, 3, 2, 1, 0, 109, 110, 5, 11, 0, 0, 110, 121, 1, 0, 0, 0, 111, 112, 10, 1, 0, 0, 112, 114, 5, 12, 0, 0, 113, 115, 3, 20, 10, 0, 114, 113, 1, 0, 0, 0, 114, 115, 1, 0, 0, 0, 115, 117, 1, 0, 0, 0, 116, 118, 5, 17, 0, 0, 117, 116, 1, 0, 0, 0, 117, 118, 1, 0, 0, 0, 118, 119, 1, 0, 0, 0, 119, 121, 5, 13, 0, 0, 120, 96, 1, 0, 0, 0, 120, 106, 1, 0, 0, 0, 120, 111, 1, 0, 0, 0, 121, 124, 1, 0, 0, 0, 122, 120, 1, 0, 0, 0, 122, 123, 1, 0, 0, 0, 123, 15, 1, 0, 0, 0, 124, 122, 1, 0, 0, 0, 125, 127, 5, 16, 0, 0, 126, 125, 1, 0, 0, 0, 126, 127, 1, 0, 0, 0, 127, 128, 1, 0, 0, 0, 128, 134, 5, 36, 0, 0, 129, 131, 5, 14, 0, 0, 130, 132, 3, 18, 9, 0, 131, 130, 1, 0, 0, 0, 131, 132, 1, 0, 0, 0, 132, 133, 1, 0, 0, 0, 133, 135, 5, 15, 0, 0, 134, 129, 1, 0, 0, 0, 134, 135, 1, 0, 0, 0, 135, 158, 1, 0, 0, 0, 136, 137, 5, 14, 0, 0, 137, 138, 3, 2, 1, 0, 138, 139, 5, 15, 0, 0, 139, 158, 1, 0, 0, 0, 140, 142, 5, 10, 0, 0, 141, 143, 3, 18, 9, 0, 142, 141, 1, 0, 0, 0, 142, 143, 1, 0, 0, 0, 143, 145, 1, 0, 0, 0, 144, 146, 5, 17, 0, 0, 145, 144, 1, 0, 0, 0, 145, 146, 1, 0, 0, 0, 146, 147, 1, 0, 0, 0, 147, 158, 5, 11, 0, 0, 148, 150, 5, 12, 0, 0, 149, 151, 3, 22, 11, 0, 150, 149, 1, 0, 0, 0, 150, 151, 1, 0, 0, 0, 151, 153, 1, 0, 0, 0, 152, 154, 5, 17, 0, 0, 153, 152, 1, 0, 0, 0, 153, 154, 1, 0, 0, 0, 154, 155, 1, 0, 0, 0, 155, 158, 5, 13, 0, 0, 156, 158, 3, 24, 12, 0, 157, 126, 1, 0, 0, 0, 157, 136, 1, 0, 0, 0, 157, 140, 1, 0, 0, 0, 157, 148, 1, 0, 0, 0, 157, 156, 1, 0, 0, 0, 158, 17, 1, 0, 0, 0, 159, 164, 3, 2, 1, 0, 160, 161, 5, 17, 0, 0, 161, 163, 3, 2, 1, 0, 162, 160, 1, 0, 0, 0, 163, 166, 1, 0, 0, 0, 164, 162, 1, 0, 0, 0, 164, 165, 1, 0, 0, 0, 165, 19, 1, 0, 0, 0, 166, 164, 1, 0, 0, 0, 167, 168, 5, 36, 0, 0, 168, 169, 5, 21, 0, 0, 169, 176, 3, 2, 1, 0, 170, 171, 5, 17, 0, 0, 171, 172, 5, 36, 0, 0, 172, 173, 5, 21, 0, 0, 173, 175, 3, 2, 1, 0, 174, 170, 1, 0, 0, 0, 175, 178, 1, 0, 0, 0, 176, 174, 1, 0, 0, 0, 176, 177, 1, 0, 0, 0, 177, 21, 1, 0, 0, 0, 178, 176, 1, 0, 0, 0, 179, 180, 3, 2, 1, 0, 180, 181, 5, 21, 0, 0, 181, 189, 3, 2, 1, 0, 182, 183, 5, 17, 0, 0, 183, 184, 3, 2, 1, 0, 184, 185, 5, 21, 0, 0, 185, 186, 3, 2, 1, 0, 186, 188, 1, 0, 0, 0, 187, 182, 1, 0, 0, 0, 188, 191, 1, 0, 0, 0, 189, 187, 1, 0, 0, 0, 189, 190, 1, 0, 0, 0, 190, 23, 1, 0, 0, 0, 191, 189, 1, 0, 0, 0, 192, 194, 5, 18, 0, 0, 193, 192, 1, 0, 0, 0, 193, 194, 1, 0, 0, 0, 194, 195, 1, 0, 0, 0, 195, 207, 5, 32, 0, 0, 196, 207, 5, 33, 0, 0, 197, 199, 5, 18, 0, 0, 198, 197, 1, 0, 0, 0, 198, 199, 1, 0, 0, 0, 199, 200, 1, 0, 0, 0, 200, 207, 5, 31, 0, 0, 201, 207, 5, 34, 0, 0, 202, 207, 5, 35, 0, 0, 203, 207, 5, 26, 0, 0, 204, 207, 5, 27, 0, 0, 205, 207, 5, 28, 0, 0, 206, 193, 1, 0, 0, 0, 206, 196, 1, 0, 0, 0, 206, 198, 1, 0, 0, 0, 206, 201, 1, 0, 0, 0, 206, 202, 1, 0, 0, 0, 206, 203, 1, 0, 0, 0, 206, 204, 1, 0, 0, 0, 206, 205, 1, 0, 0, 0, 207, 25, 1, 0, 0, 0, 29, 35, 42, 50, 61, 73, 75, 82, 88, 91, 101, 104, 114, 117, 120, 122, 126, 131, 134, 142, 145, 150, 153, 157, 164, 176, 189, 193, 198, 206]
\ No newline at end of file
+[4, 1, 36, 251, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2, 4, 7, 4, 2, 5, 7, 5, 2, 6, 7, 6, 2, 7, 7, 7, 2, 8, 7, 8, 2, 9, 7, 9, 2, 10, 7, 10, 2, 11, 7, 11, 2, 12, 7, 12, 2, 13, 7, 13, 2, 14, 7, 14, 2, 15, 7, 15, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 1, 42, 8, 1, 1, 2, 1, 2, 1, 2, 5, 2, 47, 8, 2, 10, 2, 12, 2, 50, 9, 2, 1, 3, 1, 3, 1, 3, 5, 3, 55, 8, 3, 10, 3, 12, 3, 58, 9, 3, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 5, 4, 66, 8, 4, 10, 4, 12, 4, 69, 9, 4, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 5, 5, 80, 8, 5, 10, 5, 12, 5, 83, 9, 5, 1, 6, 1, 6, 4, 6, 87, 8, 6, 11, 6, 12, 6, 88, 1, 6, 1, 6, 4, 6, 93, 8, 6, 11, 6, 12, 6, 94, 1, 6, 3, 6, 98, 8, 6, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 3, 7, 106, 8, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 3, 7, 114, 8, 7, 1, 7, 1, 7, 1, 7, 1, 7, 3, 7, 120, 8, 7, 1, 7, 1, 7, 1, 7, 5, 7, 125, 8, 7, 10, 7, 12, 7, 128, 9, 7, 1, 8, 3, 8, 131, 8, 8, 1, 8, 1, 8, 1, 8, 3, 8, 136, 8, 8, 1, 8, 3, 8, 139, 8, 8, 1, 8, 1, 8, 1, 8, 1, 8, 1, 8, 1, 8, 3, 8, 147, 8, 8, 1, 8, 3, 8, 150, 8, 8, 1, 8, 1, 8, 1, 8, 3, 8, 155, 8, 8, 1, 8, 3, 8, 158, 8, 8, 1, 8, 1, 8, 3, 8, 162, 8, 8, 1, 8, 1, 8, 1, 8, 5, 8, 167, 8, 8, 10, 8, 12, 8, 170, 9, 8, 1, 8, 1, 8, 3, 8, 174, 8, 8, 1, 8, 3, 8, 177, 8, 8, 1, 8, 1, 8, 3, 8, 181, 8, 8, 1, 9, 1, 9, 1, 9, 5, 9, 186, 8, 9, 10, 9, 12, 9, 189, 9, 9, 1, 10, 1, 10, 1, 10, 5, 10, 194, 8, 10, 10, 10, 12, 10, 197, 9, 10, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 5, 11, 207, 8, 11, 10, 11, 12, 11, 210, 9, 11, 1, 12, 3, 12, 213, 8, 12, 1, 12, 1, 12, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 5, 13, 225, 8, 13, 10, 13, 12, 13, 228, 9, 13, 1, 14, 3, 14, 231, 8, 14, 1, 14, 1, 14, 1, 15, 3, 15, 236, 8, 15, 1, 15, 1, 15, 1, 15, 3, 15, 241, 8, 15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 3, 15, 249, 8, 15, 1, 15, 0, 3, 8, 10, 14, 16, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 0, 3, 1, 0, 1, 7, 1, 0, 23, 25, 2, 0, 18, 18, 22, 22, 281, 0, 32, 1, 0, 0, 0, 2, 35, 1, 0, 0, 0, 4, 43, 1, 0, 0, 0, 6, 51, 1, 0, 0, 0, 8, 59, 1, 0, 0, 0, 10, 70, 1, 0, 0, 0, 12, 97, 1, 0, 0, 0, 14, 99, 1, 0, 0, 0, 16, 180, 1, 0, 0, 0, 18, 182, 1, 0, 0, 0, 20, 190, 1, 0, 0, 0, 22, 198, 1, 0, 0, 0, 24, 212, 1, 0, 0, 0, 26, 216, 1, 0, 0, 0, 28, 230, 1, 0, 0, 0, 30, 248, 1, 0, 0, 0, 32, 33, 3, 2, 1, 0, 33, 34, 5, 0, 0, 1, 34, 1, 1, 0, 0, 0, 35, 41, 3, 4, 2, 0, 36, 37, 5, 20, 0, 0, 37, 38, 3, 4, 2, 0, 38, 39, 5, 21, 0, 0, 39, 40, 3, 2, 1, 0, 40, 42, 1, 0, 0, 0, 41, 36, 1, 0, 0, 0, 41, 42, 1, 0, 0, 0, 42, 3, 1, 0, 0, 0, 43, 48, 3, 6, 3, 0, 44, 45, 5, 9, 0, 0, 45, 47, 3, 6, 3, 0, 46, 44, 1, 0, 0, 0, 47, 50, 1, 0, 0, 0, 48, 46, 1, 0, 0, 0, 48, 49, 1, 0, 0, 0, 49, 5, 1, 0, 0, 0, 50, 48, 1, 0, 0, 0, 51, 56, 3, 8, 4, 0, 52, 53, 5, 8, 0, 0, 53, 55, 3, 8, 4, 0, 54, 52, 1, 0, 0, 0, 55, 58, 1, 0, 0, 0, 56, 54, 1, 0, 0, 0, 56, 57, 1, 0, 0, 0, 57, 7, 1, 0, 0, 0, 58, 56, 1, 0, 0, 0, 59, 60, 6, 4, -1, 0, 60, 61, 3, 10, 5, 0, 61, 67, 1, 0, 0, 0, 62, 63, 10, 1, 0, 0, 63, 64, 7, 0, 0, 0, 64, 66, 3, 8, 4, 2, 65, 62, 1, 0, 0, 0, 66, 69, 1, 0, 0, 0, 67, 65, 1, 0, 0, 0, 67, 68, 1, 0, 0, 0, 68, 9, 1, 0, 0, 0, 69, 67, 1, 0, 0, 0, 70, 71, 6, 5, -1, 0, 71, 72, 3, 12, 6, 0, 72, 81, 1, 0, 0, 0, 73, 74, 10, 2, 0, 0, 74, 75, 7, 1, 0, 0, 75, 80, 3, 10, 5, 3, 76, 77, 10, 1, 0, 0, 77, 78, 7, 2, 0, 0, 78, 80, 3, 10, 5, 2, 79, 73, 1, 0, 0, 0, 79, 76, 1, 0, 0, 0, 80, 83, 1, 0, 0, 0, 81, 79, 1, 0, 0, 0, 81, 82, 1, 0, 0, 0, 82, 11, 1, 0, 0, 0, 83, 81, 1, 0, 0, 0, 84, 98, 3, 14, 7, 0, 85, 87, 5, 19, 0, 0, 86, 85, 1, 0, 0, 0, 87, 88, 1, 0, 0, 0, 88, 86, 1, 0, 0, 0, 88, 89, 1, 0, 0, 0, 89, 90, 1, 0, 0, 0, 90, 98, 3, 14, 7, 0, 91, 93, 5, 18, 0, 0, 92, 91, 1, 0, 0, 0, 93, 94, 1, 0, 0, 0, 94, 92, 1, 0, 0, 0, 94, 95, 1, 0, 0, 0, 95, 96, 1, 0, 0, 0, 96, 98, 3, 14, 7, 0, 97, 84, 1, 0, 0, 0, 97, 86, 1, 0, 0, 0, 97, 92, 1, 0, 0, 0, 98, 13, 1, 0, 0, 0, 99, 100, 6, 7, -1, 0, 100, 101, 3, 16, 8, 0, 101, 126, 1, 0, 0, 0, 102, 103, 10, 3, 0, 0, 103, 105, 5, 16, 0, 0, 104, 106, 5, 20, 0, 0, 105, 104, 1, 0, 0, 0, 105, 106, 1, 0, 0, 0, 106, 107, 1, 0, 0, 0, 107, 125, 5, 36, 0, 0, 108, 109, 10, 2, 0, 0, 109, 110, 5, 16, 0, 0, 110, 111, 5, 36, 0, 0, 111, 113, 5, 14, 0, 0, 112, 114, 3, 18, 9, 0, 113, 112, 1, 0, 0, 0, 113, 114, 1, 0, 0, 0, 114, 115, 1, 0, 0, 0, 115, 125, 5, 15, 0, 0, 116, 117, 10, 1, 0, 0, 117, 119, 5, 10, 0, 0, 118, 120, 5, 20, 0, 0, 119, 118, 1, 0, 0, 0, 119, 120, 1, 0, 0, 0, 120, 121, 1, 0, 0, 0, 121, 122, 3, 2, 1, 0, 122, 123, 5, 11, 0, 0, 123, 125, 1, 0, 0, 0, 124, 102, 1, 0, 0, 0, 124, 108, 1, 0, 0, 0, 124, 116, 1, 0, 0, 0, 125, 128, 1, 0, 0, 0, 126, 124, 1, 0, 0, 0, 126, 127, 1, 0, 0, 0, 127, 15, 1, 0, 0, 0, 128, 126, 1, 0, 0, 0, 129, 131, 5, 16, 0, 0, 130, 129, 1, 0, 0, 0, 130, 131, 1, 0, 0, 0, 131, 132, 1, 0, 0, 0, 132, 138, 5, 36, 0, 0, 133, 135, 5, 14, 0, 0, 134, 136, 3, 18, 9, 0, 135, 134, 1, 0, 0, 0, 135, 136, 1, 0, 0, 0, 136, 137, 1, 0, 0, 0, 137, 139, 5, 15, 0, 0, 138, 133, 1, 0, 0, 0, 138, 139, 1, 0, 0, 0, 139, 181, 1, 0, 0, 0, 140, 141, 5, 14, 0, 0, 141, 142, 3, 2, 1, 0, 142, 143, 5, 15, 0, 0, 143, 181, 1, 0, 0, 0, 144, 146, 5, 10, 0, 0, 145, 147, 3, 20, 10, 0, 146, 145, 1, 0, 0, 0, 146, 147, 1, 0, 0, 0, 147, 149, 1, 0, 0, 0, 148, 150, 5, 17, 0, 0, 149, 148, 1, 0, 0, 0, 149, 150, 1, 0, 0, 0, 150, 151, 1, 0, 0, 0, 151, 181, 5, 11, 0, 0, 152, 154, 5, 12, 0, 0, 153, 155, 3, 26, 13, 0, 154, 153, 1, 0, 0, 0, 154, 155, 1, 0, 0, 0, 155, 157, 1, 0, 0, 0, 156, 158, 5, 17, 0, 0, 157, 156, 1, 0, 0, 0, 157, 158, 1, 0, 0, 0, 158, 159, 1, 0, 0, 0, 159, 181, 5, 13, 0, 0, 160, 162, 5, 16, 0, 0, 161, 160, 1, 0, 0, 0, 161, 162, 1, 0, 0, 0, 162, 163, 1, 0, 0, 0, 163, 168, 5, 36, 0, 0, 164, 165, 5, 16, 0, 0, 165, 167, 5, 36, 0, 0, 166, 164, 1, 0, 0, 0, 167, 170, 1, 0, 0, 0, 168, 166, 1, 0, 0, 0, 168, 169, 1, 0, 0, 0, 169, 171, 1, 0, 0, 0, 170, 168, 1, 0, 0, 0, 171, 173, 5, 12, 0, 0, 172, 174, 3, 22, 11, 0, 173, 172, 1, 0, 0, 0, 173, 174, 1, 0, 0, 0, 174, 176, 1, 0, 0, 0, 175, 177, 5, 17, 0, 0, 176, 175, 1, 0, 0, 0, 176, 177, 1, 0, 0, 0, 177, 178, 1, 0, 0, 0, 178, 181, 5, 13, 0, 0, 179, 181, 3, 30, 15, 0, 180, 130, 1, 0, 0, 0, 180, 140, 1, 0, 0, 0, 180, 144, 1, 0, 0, 0, 180, 152, 1, 0, 0, 0, 180, 161, 1, 0, 0, 0, 180, 179, 1, 0, 0, 0, 181, 17, 1, 0, 0, 0, 182, 187, 3, 2, 1, 0, 183, 184, 5, 17, 0, 0, 184, 186, 3, 2, 1, 0, 185, 183, 1, 0, 0, 0, 186, 189, 1, 0, 0, 0, 187, 185, 1, 0, 0, 0, 187, 188, 1, 0, 0, 0, 188, 19, 1, 0, 0, 0, 189, 187, 1, 0, 0, 0, 190, 195, 3, 28, 14, 0, 191, 192, 5, 17, 0, 0, 192, 194, 3, 28, 14, 0, 193, 191, 1, 0, 0, 0, 194, 197, 1, 0, 0, 0, 195, 193, 1, 0, 0, 0, 195, 196, 1, 0, 0, 0, 196, 21, 1, 0, 0, 0, 197, 195, 1, 0, 0, 0, 198, 199, 3, 24, 12, 0, 199, 200, 5, 21, 0, 0, 200, 208, 3, 2, 1, 0, 201, 202, 5, 17, 0, 0, 202, 203, 3, 24, 12, 0, 203, 204, 5, 21, 0, 0, 204, 205, 3, 2, 1, 0, 205, 207, 1, 0, 0, 0, 206, 201, 1, 0, 0, 0, 207, 210, 1, 0, 0, 0, 208, 206, 1, 0, 0, 0, 208, 209, 1, 0, 0, 0, 209, 23, 1, 0, 0, 0, 210, 208, 1, 0, 0, 0, 211, 213, 5, 20, 0, 0, 212, 211, 1, 0, 0, 0, 212, 213, 1, 0, 0, 0, 213, 214, 1, 0, 0, 0, 214, 215, 5, 36, 0, 0, 215, 25, 1, 0, 0, 0, 216, 217, 3, 28, 14, 0, 217, 218, 5, 21, 0, 0, 218, 226, 3, 2, 1, 0, 219, 220, 5, 17, 0, 0, 220, 221, 3, 28, 14, 0, 221, 222, 5, 21, 0, 0, 222, 223, 3, 2, 1, 0, 223, 225, 1, 0, 0, 0, 224, 219, 1, 0, 0, 0, 225, 228, 1, 0, 0, 0, 226, 224, 1, 0, 0, 0, 226, 227, 1, 0, 0, 0, 227, 27, 1, 0, 0, 0, 228, 226, 1, 0, 0, 0, 229, 231, 5, 20, 0, 0, 230, 229, 1, 0, 0, 0, 230, 231, 1, 0, 0, 0, 231, 232, 1, 0, 0, 0, 232, 233, 3, 2, 1, 0, 233, 29, 1, 0, 0, 0, 234, 236, 5, 18, 0, 0, 235, 234, 1, 0, 0, 0, 235, 236, 1, 0, 0, 0, 236, 237, 1, 0, 0, 0, 237, 249, 5, 32, 0, 0, 238, 249, 5, 33, 0, 0, 239, 241, 5, 18, 0, 0, 240, 239, 1, 0, 0, 0, 240, 241, 1, 0, 0, 0, 241, 242, 1, 0, 0, 0, 242, 249, 5, 31, 0, 0, 243, 249, 5, 34, 0, 0, 244, 249, 5, 35, 0, 0, 245, 249, 5, 26, 0, 0, 246, 249, 5, 27, 0, 0, 247, 249, 5, 28, 0, 0, 248, 235, 1, 0, 0, 0, 248, 238, 1, 0, 0, 0, 248, 240, 1, 0, 0, 0, 248, 243, 1, 0, 0, 0, 248, 244, 1, 0, 0, 0, 248, 245, 1, 0, 0, 0, 248, 246, 1, 0, 0, 0, 248, 247, 1, 0, 0, 0, 249, 31, 1, 0, 0, 0, 35, 41, 48, 56, 67, 79, 81, 88, 94, 97, 105, 113, 119, 124, 126, 130, 135, 138, 146, 149, 154, 157, 161, 168, 173, 176, 180, 187, 195, 208, 212, 226, 230, 235, 240, 248]
\ No newline at end of file
diff --git a/vendor/github.com/google/cel-go/parser/gen/cel_base_listener.go b/vendor/github.com/google/cel-go/parser/gen/cel_base_listener.go
index 969a598618..0247f470a7 100644
--- a/vendor/github.com/google/cel-go/parser/gen/cel_base_listener.go
+++ b/vendor/github.com/google/cel-go/parser/gen/cel_base_listener.go
@@ -1,7 +1,7 @@
-// Code generated from /Users/tswadell/go/src/github.com/google/cel-go/parser/gen/CEL.g4 by ANTLR 4.10.1. DO NOT EDIT.
+// Code generated from /Users/tswadell/go/src/github.com/google/cel-go/parser/gen/CEL.g4 by ANTLR 4.12.0. DO NOT EDIT.
package gen // CEL
-import "github.com/antlr/antlr4/runtime/Go/antlr"
+import "github.com/antlr/antlr4/runtime/Go/antlr/v4"
// BaseCELListener is a complete listener for a parse tree produced by CELParser.
type BaseCELListener struct{}
@@ -74,11 +74,17 @@ func (s *BaseCELListener) EnterNegate(ctx *NegateContext) {}
// ExitNegate is called when production Negate is exited.
func (s *BaseCELListener) ExitNegate(ctx *NegateContext) {}
-// EnterSelectOrCall is called when production SelectOrCall is entered.
-func (s *BaseCELListener) EnterSelectOrCall(ctx *SelectOrCallContext) {}
+// EnterMemberCall is called when production MemberCall is entered.
+func (s *BaseCELListener) EnterMemberCall(ctx *MemberCallContext) {}
-// ExitSelectOrCall is called when production SelectOrCall is exited.
-func (s *BaseCELListener) ExitSelectOrCall(ctx *SelectOrCallContext) {}
+// ExitMemberCall is called when production MemberCall is exited.
+func (s *BaseCELListener) ExitMemberCall(ctx *MemberCallContext) {}
+
+// EnterSelect is called when production Select is entered.
+func (s *BaseCELListener) EnterSelect(ctx *SelectContext) {}
+
+// ExitSelect is called when production Select is exited.
+func (s *BaseCELListener) ExitSelect(ctx *SelectContext) {}
// EnterPrimaryExpr is called when production PrimaryExpr is entered.
func (s *BaseCELListener) EnterPrimaryExpr(ctx *PrimaryExprContext) {}
@@ -92,12 +98,6 @@ func (s *BaseCELListener) EnterIndex(ctx *IndexContext) {}
// ExitIndex is called when production Index is exited.
func (s *BaseCELListener) ExitIndex(ctx *IndexContext) {}
-// EnterCreateMessage is called when production CreateMessage is entered.
-func (s *BaseCELListener) EnterCreateMessage(ctx *CreateMessageContext) {}
-
-// ExitCreateMessage is called when production CreateMessage is exited.
-func (s *BaseCELListener) ExitCreateMessage(ctx *CreateMessageContext) {}
-
// EnterIdentOrGlobalCall is called when production IdentOrGlobalCall is entered.
func (s *BaseCELListener) EnterIdentOrGlobalCall(ctx *IdentOrGlobalCallContext) {}
@@ -122,6 +122,12 @@ func (s *BaseCELListener) EnterCreateStruct(ctx *CreateStructContext) {}
// ExitCreateStruct is called when production CreateStruct is exited.
func (s *BaseCELListener) ExitCreateStruct(ctx *CreateStructContext) {}
+// EnterCreateMessage is called when production CreateMessage is entered.
+func (s *BaseCELListener) EnterCreateMessage(ctx *CreateMessageContext) {}
+
+// ExitCreateMessage is called when production CreateMessage is exited.
+func (s *BaseCELListener) ExitCreateMessage(ctx *CreateMessageContext) {}
+
// EnterConstantLiteral is called when production ConstantLiteral is entered.
func (s *BaseCELListener) EnterConstantLiteral(ctx *ConstantLiteralContext) {}
@@ -134,18 +140,36 @@ func (s *BaseCELListener) EnterExprList(ctx *ExprListContext) {}
// ExitExprList is called when production exprList is exited.
func (s *BaseCELListener) ExitExprList(ctx *ExprListContext) {}
+// EnterListInit is called when production listInit is entered.
+func (s *BaseCELListener) EnterListInit(ctx *ListInitContext) {}
+
+// ExitListInit is called when production listInit is exited.
+func (s *BaseCELListener) ExitListInit(ctx *ListInitContext) {}
+
// EnterFieldInitializerList is called when production fieldInitializerList is entered.
func (s *BaseCELListener) EnterFieldInitializerList(ctx *FieldInitializerListContext) {}
// ExitFieldInitializerList is called when production fieldInitializerList is exited.
func (s *BaseCELListener) ExitFieldInitializerList(ctx *FieldInitializerListContext) {}
+// EnterOptField is called when production optField is entered.
+func (s *BaseCELListener) EnterOptField(ctx *OptFieldContext) {}
+
+// ExitOptField is called when production optField is exited.
+func (s *BaseCELListener) ExitOptField(ctx *OptFieldContext) {}
+
// EnterMapInitializerList is called when production mapInitializerList is entered.
func (s *BaseCELListener) EnterMapInitializerList(ctx *MapInitializerListContext) {}
// ExitMapInitializerList is called when production mapInitializerList is exited.
func (s *BaseCELListener) ExitMapInitializerList(ctx *MapInitializerListContext) {}
+// EnterOptExpr is called when production optExpr is entered.
+func (s *BaseCELListener) EnterOptExpr(ctx *OptExprContext) {}
+
+// ExitOptExpr is called when production optExpr is exited.
+func (s *BaseCELListener) ExitOptExpr(ctx *OptExprContext) {}
+
// EnterInt is called when production Int is entered.
func (s *BaseCELListener) EnterInt(ctx *IntContext) {}
diff --git a/vendor/github.com/google/cel-go/parser/gen/cel_base_visitor.go b/vendor/github.com/google/cel-go/parser/gen/cel_base_visitor.go
index 8e84579ed1..52a7f4dc57 100644
--- a/vendor/github.com/google/cel-go/parser/gen/cel_base_visitor.go
+++ b/vendor/github.com/google/cel-go/parser/gen/cel_base_visitor.go
@@ -1,7 +1,7 @@
-// Code generated from /Users/tswadell/go/src/github.com/google/cel-go/parser/gen/CEL.g4 by ANTLR 4.10.1. DO NOT EDIT.
+// Code generated from /Users/tswadell/go/src/github.com/google/cel-go/parser/gen/CEL.g4 by ANTLR 4.12.0. DO NOT EDIT.
package gen // CEL
-import "github.com/antlr/antlr4/runtime/Go/antlr"
+import "github.com/antlr/antlr4/runtime/Go/antlr/v4"
type BaseCELVisitor struct {
*antlr.BaseParseTreeVisitor
@@ -43,19 +43,19 @@ func (v *BaseCELVisitor) VisitNegate(ctx *NegateContext) interface{} {
return v.VisitChildren(ctx)
}
-func (v *BaseCELVisitor) VisitSelectOrCall(ctx *SelectOrCallContext) interface{} {
+func (v *BaseCELVisitor) VisitMemberCall(ctx *MemberCallContext) interface{} {
return v.VisitChildren(ctx)
}
-func (v *BaseCELVisitor) VisitPrimaryExpr(ctx *PrimaryExprContext) interface{} {
+func (v *BaseCELVisitor) VisitSelect(ctx *SelectContext) interface{} {
return v.VisitChildren(ctx)
}
-func (v *BaseCELVisitor) VisitIndex(ctx *IndexContext) interface{} {
+func (v *BaseCELVisitor) VisitPrimaryExpr(ctx *PrimaryExprContext) interface{} {
return v.VisitChildren(ctx)
}
-func (v *BaseCELVisitor) VisitCreateMessage(ctx *CreateMessageContext) interface{} {
+func (v *BaseCELVisitor) VisitIndex(ctx *IndexContext) interface{} {
return v.VisitChildren(ctx)
}
@@ -75,6 +75,10 @@ func (v *BaseCELVisitor) VisitCreateStruct(ctx *CreateStructContext) interface{}
return v.VisitChildren(ctx)
}
+func (v *BaseCELVisitor) VisitCreateMessage(ctx *CreateMessageContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
func (v *BaseCELVisitor) VisitConstantLiteral(ctx *ConstantLiteralContext) interface{} {
return v.VisitChildren(ctx)
}
@@ -83,14 +87,26 @@ func (v *BaseCELVisitor) VisitExprList(ctx *ExprListContext) interface{} {
return v.VisitChildren(ctx)
}
+func (v *BaseCELVisitor) VisitListInit(ctx *ListInitContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
func (v *BaseCELVisitor) VisitFieldInitializerList(ctx *FieldInitializerListContext) interface{} {
return v.VisitChildren(ctx)
}
+func (v *BaseCELVisitor) VisitOptField(ctx *OptFieldContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
func (v *BaseCELVisitor) VisitMapInitializerList(ctx *MapInitializerListContext) interface{} {
return v.VisitChildren(ctx)
}
+func (v *BaseCELVisitor) VisitOptExpr(ctx *OptExprContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
func (v *BaseCELVisitor) VisitInt(ctx *IntContext) interface{} {
return v.VisitChildren(ctx)
}
diff --git a/vendor/github.com/google/cel-go/parser/gen/cel_lexer.go b/vendor/github.com/google/cel-go/parser/gen/cel_lexer.go
index 7b4cca62e6..98ddc06d0b 100644
--- a/vendor/github.com/google/cel-go/parser/gen/cel_lexer.go
+++ b/vendor/github.com/google/cel-go/parser/gen/cel_lexer.go
@@ -1,4 +1,4 @@
-// Code generated from /Users/tswadell/go/src/github.com/google/cel-go/parser/gen/CEL.g4 by ANTLR 4.10.1. DO NOT EDIT.
+// Code generated from /Users/tswadell/go/src/github.com/google/cel-go/parser/gen/CEL.g4 by ANTLR 4.12.0. DO NOT EDIT.
package gen
@@ -7,7 +7,7 @@ import (
"sync"
"unicode"
- "github.com/antlr/antlr4/runtime/Go/antlr"
+ "github.com/antlr/antlr4/runtime/Go/antlr/v4"
)
// Suppress unused import error
diff --git a/vendor/github.com/google/cel-go/parser/gen/cel_listener.go b/vendor/github.com/google/cel-go/parser/gen/cel_listener.go
index 1b631b6e1b..73b7f1d39f 100644
--- a/vendor/github.com/google/cel-go/parser/gen/cel_listener.go
+++ b/vendor/github.com/google/cel-go/parser/gen/cel_listener.go
@@ -1,7 +1,7 @@
-// Code generated from /Users/tswadell/go/src/github.com/google/cel-go/parser/gen/CEL.g4 by ANTLR 4.10.1. DO NOT EDIT.
+// Code generated from /Users/tswadell/go/src/github.com/google/cel-go/parser/gen/CEL.g4 by ANTLR 4.12.0. DO NOT EDIT.
package gen // CEL
-import "github.com/antlr/antlr4/runtime/Go/antlr"
+import "github.com/antlr/antlr4/runtime/Go/antlr/v4"
// CELListener is a complete listener for a parse tree produced by CELParser.
type CELListener interface {
@@ -34,8 +34,11 @@ type CELListener interface {
// EnterNegate is called when entering the Negate production.
EnterNegate(c *NegateContext)
- // EnterSelectOrCall is called when entering the SelectOrCall production.
- EnterSelectOrCall(c *SelectOrCallContext)
+ // EnterMemberCall is called when entering the MemberCall production.
+ EnterMemberCall(c *MemberCallContext)
+
+ // EnterSelect is called when entering the Select production.
+ EnterSelect(c *SelectContext)
// EnterPrimaryExpr is called when entering the PrimaryExpr production.
EnterPrimaryExpr(c *PrimaryExprContext)
@@ -43,9 +46,6 @@ type CELListener interface {
// EnterIndex is called when entering the Index production.
EnterIndex(c *IndexContext)
- // EnterCreateMessage is called when entering the CreateMessage production.
- EnterCreateMessage(c *CreateMessageContext)
-
// EnterIdentOrGlobalCall is called when entering the IdentOrGlobalCall production.
EnterIdentOrGlobalCall(c *IdentOrGlobalCallContext)
@@ -58,18 +58,30 @@ type CELListener interface {
// EnterCreateStruct is called when entering the CreateStruct production.
EnterCreateStruct(c *CreateStructContext)
+ // EnterCreateMessage is called when entering the CreateMessage production.
+ EnterCreateMessage(c *CreateMessageContext)
+
// EnterConstantLiteral is called when entering the ConstantLiteral production.
EnterConstantLiteral(c *ConstantLiteralContext)
// EnterExprList is called when entering the exprList production.
EnterExprList(c *ExprListContext)
+ // EnterListInit is called when entering the listInit production.
+ EnterListInit(c *ListInitContext)
+
// EnterFieldInitializerList is called when entering the fieldInitializerList production.
EnterFieldInitializerList(c *FieldInitializerListContext)
+ // EnterOptField is called when entering the optField production.
+ EnterOptField(c *OptFieldContext)
+
// EnterMapInitializerList is called when entering the mapInitializerList production.
EnterMapInitializerList(c *MapInitializerListContext)
+ // EnterOptExpr is called when entering the optExpr production.
+ EnterOptExpr(c *OptExprContext)
+
// EnterInt is called when entering the Int production.
EnterInt(c *IntContext)
@@ -121,8 +133,11 @@ type CELListener interface {
// ExitNegate is called when exiting the Negate production.
ExitNegate(c *NegateContext)
- // ExitSelectOrCall is called when exiting the SelectOrCall production.
- ExitSelectOrCall(c *SelectOrCallContext)
+ // ExitMemberCall is called when exiting the MemberCall production.
+ ExitMemberCall(c *MemberCallContext)
+
+ // ExitSelect is called when exiting the Select production.
+ ExitSelect(c *SelectContext)
// ExitPrimaryExpr is called when exiting the PrimaryExpr production.
ExitPrimaryExpr(c *PrimaryExprContext)
@@ -130,9 +145,6 @@ type CELListener interface {
// ExitIndex is called when exiting the Index production.
ExitIndex(c *IndexContext)
- // ExitCreateMessage is called when exiting the CreateMessage production.
- ExitCreateMessage(c *CreateMessageContext)
-
// ExitIdentOrGlobalCall is called when exiting the IdentOrGlobalCall production.
ExitIdentOrGlobalCall(c *IdentOrGlobalCallContext)
@@ -145,18 +157,30 @@ type CELListener interface {
// ExitCreateStruct is called when exiting the CreateStruct production.
ExitCreateStruct(c *CreateStructContext)
+ // ExitCreateMessage is called when exiting the CreateMessage production.
+ ExitCreateMessage(c *CreateMessageContext)
+
// ExitConstantLiteral is called when exiting the ConstantLiteral production.
ExitConstantLiteral(c *ConstantLiteralContext)
// ExitExprList is called when exiting the exprList production.
ExitExprList(c *ExprListContext)
+ // ExitListInit is called when exiting the listInit production.
+ ExitListInit(c *ListInitContext)
+
// ExitFieldInitializerList is called when exiting the fieldInitializerList production.
ExitFieldInitializerList(c *FieldInitializerListContext)
+ // ExitOptField is called when exiting the optField production.
+ ExitOptField(c *OptFieldContext)
+
// ExitMapInitializerList is called when exiting the mapInitializerList production.
ExitMapInitializerList(c *MapInitializerListContext)
+ // ExitOptExpr is called when exiting the optExpr production.
+ ExitOptExpr(c *OptExprContext)
+
// ExitInt is called when exiting the Int production.
ExitInt(c *IntContext)
diff --git a/vendor/github.com/google/cel-go/parser/gen/cel_parser.go b/vendor/github.com/google/cel-go/parser/gen/cel_parser.go
index afb3fe0d1c..0cb6c8eae8 100644
--- a/vendor/github.com/google/cel-go/parser/gen/cel_parser.go
+++ b/vendor/github.com/google/cel-go/parser/gen/cel_parser.go
@@ -1,4 +1,4 @@
-// Code generated from /Users/tswadell/go/src/github.com/google/cel-go/parser/gen/CEL.g4 by ANTLR 4.10.1. DO NOT EDIT.
+// Code generated from /Users/tswadell/go/src/github.com/google/cel-go/parser/gen/CEL.g4 by ANTLR 4.12.0. DO NOT EDIT.
package gen // CEL
import (
@@ -6,7 +6,7 @@ import (
"strconv"
"sync"
- "github.com/antlr/antlr4/runtime/Go/antlr"
+ "github.com/antlr/antlr4/runtime/Go/antlr/v4"
)
// Suppress unused import errors
@@ -46,106 +46,125 @@ func celParserInit() {
}
staticData.ruleNames = []string{
"start", "expr", "conditionalOr", "conditionalAnd", "relation", "calc",
- "unary", "member", "primary", "exprList", "fieldInitializerList", "mapInitializerList",
- "literal",
+ "unary", "member", "primary", "exprList", "listInit", "fieldInitializerList",
+ "optField", "mapInitializerList", "optExpr", "literal",
}
staticData.predictionContextCache = antlr.NewPredictionContextCache()
staticData.serializedATN = []int32{
- 4, 1, 36, 209, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2, 4, 7,
+ 4, 1, 36, 251, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2, 4, 7,
4, 2, 5, 7, 5, 2, 6, 7, 6, 2, 7, 7, 7, 2, 8, 7, 8, 2, 9, 7, 9, 2, 10, 7,
- 10, 2, 11, 7, 11, 2, 12, 7, 12, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 3, 1, 36, 8, 1, 1, 2, 1, 2, 1, 2, 5, 2, 41, 8, 2, 10, 2,
- 12, 2, 44, 9, 2, 1, 3, 1, 3, 1, 3, 5, 3, 49, 8, 3, 10, 3, 12, 3, 52, 9,
- 3, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 5, 4, 60, 8, 4, 10, 4, 12, 4, 63,
- 9, 4, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 5, 5, 74, 8,
- 5, 10, 5, 12, 5, 77, 9, 5, 1, 6, 1, 6, 4, 6, 81, 8, 6, 11, 6, 12, 6, 82,
- 1, 6, 1, 6, 4, 6, 87, 8, 6, 11, 6, 12, 6, 88, 1, 6, 3, 6, 92, 8, 6, 1,
- 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 3, 7, 102, 8, 7, 1, 7, 3,
- 7, 105, 8, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 3, 7, 115,
- 8, 7, 1, 7, 3, 7, 118, 8, 7, 1, 7, 5, 7, 121, 8, 7, 10, 7, 12, 7, 124,
- 9, 7, 1, 8, 3, 8, 127, 8, 8, 1, 8, 1, 8, 1, 8, 3, 8, 132, 8, 8, 1, 8, 3,
- 8, 135, 8, 8, 1, 8, 1, 8, 1, 8, 1, 8, 1, 8, 1, 8, 3, 8, 143, 8, 8, 1, 8,
- 3, 8, 146, 8, 8, 1, 8, 1, 8, 1, 8, 3, 8, 151, 8, 8, 1, 8, 3, 8, 154, 8,
- 8, 1, 8, 1, 8, 3, 8, 158, 8, 8, 1, 9, 1, 9, 1, 9, 5, 9, 163, 8, 9, 10,
- 9, 12, 9, 166, 9, 9, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 5,
- 10, 175, 8, 10, 10, 10, 12, 10, 178, 9, 10, 1, 11, 1, 11, 1, 11, 1, 11,
- 1, 11, 1, 11, 1, 11, 1, 11, 5, 11, 188, 8, 11, 10, 11, 12, 11, 191, 9,
- 11, 1, 12, 3, 12, 194, 8, 12, 1, 12, 1, 12, 1, 12, 3, 12, 199, 8, 12, 1,
- 12, 1, 12, 1, 12, 1, 12, 1, 12, 1, 12, 3, 12, 207, 8, 12, 1, 12, 0, 3,
- 8, 10, 14, 13, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 0, 3, 1,
- 0, 1, 7, 1, 0, 23, 25, 2, 0, 18, 18, 22, 22, 235, 0, 26, 1, 0, 0, 0, 2,
- 29, 1, 0, 0, 0, 4, 37, 1, 0, 0, 0, 6, 45, 1, 0, 0, 0, 8, 53, 1, 0, 0, 0,
- 10, 64, 1, 0, 0, 0, 12, 91, 1, 0, 0, 0, 14, 93, 1, 0, 0, 0, 16, 157, 1,
- 0, 0, 0, 18, 159, 1, 0, 0, 0, 20, 167, 1, 0, 0, 0, 22, 179, 1, 0, 0, 0,
- 24, 206, 1, 0, 0, 0, 26, 27, 3, 2, 1, 0, 27, 28, 5, 0, 0, 1, 28, 1, 1,
- 0, 0, 0, 29, 35, 3, 4, 2, 0, 30, 31, 5, 20, 0, 0, 31, 32, 3, 4, 2, 0, 32,
- 33, 5, 21, 0, 0, 33, 34, 3, 2, 1, 0, 34, 36, 1, 0, 0, 0, 35, 30, 1, 0,
- 0, 0, 35, 36, 1, 0, 0, 0, 36, 3, 1, 0, 0, 0, 37, 42, 3, 6, 3, 0, 38, 39,
- 5, 9, 0, 0, 39, 41, 3, 6, 3, 0, 40, 38, 1, 0, 0, 0, 41, 44, 1, 0, 0, 0,
- 42, 40, 1, 0, 0, 0, 42, 43, 1, 0, 0, 0, 43, 5, 1, 0, 0, 0, 44, 42, 1, 0,
- 0, 0, 45, 50, 3, 8, 4, 0, 46, 47, 5, 8, 0, 0, 47, 49, 3, 8, 4, 0, 48, 46,
- 1, 0, 0, 0, 49, 52, 1, 0, 0, 0, 50, 48, 1, 0, 0, 0, 50, 51, 1, 0, 0, 0,
- 51, 7, 1, 0, 0, 0, 52, 50, 1, 0, 0, 0, 53, 54, 6, 4, -1, 0, 54, 55, 3,
- 10, 5, 0, 55, 61, 1, 0, 0, 0, 56, 57, 10, 1, 0, 0, 57, 58, 7, 0, 0, 0,
- 58, 60, 3, 8, 4, 2, 59, 56, 1, 0, 0, 0, 60, 63, 1, 0, 0, 0, 61, 59, 1,
- 0, 0, 0, 61, 62, 1, 0, 0, 0, 62, 9, 1, 0, 0, 0, 63, 61, 1, 0, 0, 0, 64,
- 65, 6, 5, -1, 0, 65, 66, 3, 12, 6, 0, 66, 75, 1, 0, 0, 0, 67, 68, 10, 2,
- 0, 0, 68, 69, 7, 1, 0, 0, 69, 74, 3, 10, 5, 3, 70, 71, 10, 1, 0, 0, 71,
- 72, 7, 2, 0, 0, 72, 74, 3, 10, 5, 2, 73, 67, 1, 0, 0, 0, 73, 70, 1, 0,
- 0, 0, 74, 77, 1, 0, 0, 0, 75, 73, 1, 0, 0, 0, 75, 76, 1, 0, 0, 0, 76, 11,
- 1, 0, 0, 0, 77, 75, 1, 0, 0, 0, 78, 92, 3, 14, 7, 0, 79, 81, 5, 19, 0,
- 0, 80, 79, 1, 0, 0, 0, 81, 82, 1, 0, 0, 0, 82, 80, 1, 0, 0, 0, 82, 83,
- 1, 0, 0, 0, 83, 84, 1, 0, 0, 0, 84, 92, 3, 14, 7, 0, 85, 87, 5, 18, 0,
- 0, 86, 85, 1, 0, 0, 0, 87, 88, 1, 0, 0, 0, 88, 86, 1, 0, 0, 0, 88, 89,
- 1, 0, 0, 0, 89, 90, 1, 0, 0, 0, 90, 92, 3, 14, 7, 0, 91, 78, 1, 0, 0, 0,
- 91, 80, 1, 0, 0, 0, 91, 86, 1, 0, 0, 0, 92, 13, 1, 0, 0, 0, 93, 94, 6,
- 7, -1, 0, 94, 95, 3, 16, 8, 0, 95, 122, 1, 0, 0, 0, 96, 97, 10, 3, 0, 0,
- 97, 98, 5, 16, 0, 0, 98, 104, 5, 36, 0, 0, 99, 101, 5, 14, 0, 0, 100, 102,
- 3, 18, 9, 0, 101, 100, 1, 0, 0, 0, 101, 102, 1, 0, 0, 0, 102, 103, 1, 0,
- 0, 0, 103, 105, 5, 15, 0, 0, 104, 99, 1, 0, 0, 0, 104, 105, 1, 0, 0, 0,
- 105, 121, 1, 0, 0, 0, 106, 107, 10, 2, 0, 0, 107, 108, 5, 10, 0, 0, 108,
- 109, 3, 2, 1, 0, 109, 110, 5, 11, 0, 0, 110, 121, 1, 0, 0, 0, 111, 112,
- 10, 1, 0, 0, 112, 114, 5, 12, 0, 0, 113, 115, 3, 20, 10, 0, 114, 113, 1,
- 0, 0, 0, 114, 115, 1, 0, 0, 0, 115, 117, 1, 0, 0, 0, 116, 118, 5, 17, 0,
- 0, 117, 116, 1, 0, 0, 0, 117, 118, 1, 0, 0, 0, 118, 119, 1, 0, 0, 0, 119,
- 121, 5, 13, 0, 0, 120, 96, 1, 0, 0, 0, 120, 106, 1, 0, 0, 0, 120, 111,
- 1, 0, 0, 0, 121, 124, 1, 0, 0, 0, 122, 120, 1, 0, 0, 0, 122, 123, 1, 0,
- 0, 0, 123, 15, 1, 0, 0, 0, 124, 122, 1, 0, 0, 0, 125, 127, 5, 16, 0, 0,
- 126, 125, 1, 0, 0, 0, 126, 127, 1, 0, 0, 0, 127, 128, 1, 0, 0, 0, 128,
- 134, 5, 36, 0, 0, 129, 131, 5, 14, 0, 0, 130, 132, 3, 18, 9, 0, 131, 130,
- 1, 0, 0, 0, 131, 132, 1, 0, 0, 0, 132, 133, 1, 0, 0, 0, 133, 135, 5, 15,
- 0, 0, 134, 129, 1, 0, 0, 0, 134, 135, 1, 0, 0, 0, 135, 158, 1, 0, 0, 0,
- 136, 137, 5, 14, 0, 0, 137, 138, 3, 2, 1, 0, 138, 139, 5, 15, 0, 0, 139,
- 158, 1, 0, 0, 0, 140, 142, 5, 10, 0, 0, 141, 143, 3, 18, 9, 0, 142, 141,
- 1, 0, 0, 0, 142, 143, 1, 0, 0, 0, 143, 145, 1, 0, 0, 0, 144, 146, 5, 17,
- 0, 0, 145, 144, 1, 0, 0, 0, 145, 146, 1, 0, 0, 0, 146, 147, 1, 0, 0, 0,
- 147, 158, 5, 11, 0, 0, 148, 150, 5, 12, 0, 0, 149, 151, 3, 22, 11, 0, 150,
- 149, 1, 0, 0, 0, 150, 151, 1, 0, 0, 0, 151, 153, 1, 0, 0, 0, 152, 154,
- 5, 17, 0, 0, 153, 152, 1, 0, 0, 0, 153, 154, 1, 0, 0, 0, 154, 155, 1, 0,
- 0, 0, 155, 158, 5, 13, 0, 0, 156, 158, 3, 24, 12, 0, 157, 126, 1, 0, 0,
- 0, 157, 136, 1, 0, 0, 0, 157, 140, 1, 0, 0, 0, 157, 148, 1, 0, 0, 0, 157,
- 156, 1, 0, 0, 0, 158, 17, 1, 0, 0, 0, 159, 164, 3, 2, 1, 0, 160, 161, 5,
- 17, 0, 0, 161, 163, 3, 2, 1, 0, 162, 160, 1, 0, 0, 0, 163, 166, 1, 0, 0,
- 0, 164, 162, 1, 0, 0, 0, 164, 165, 1, 0, 0, 0, 165, 19, 1, 0, 0, 0, 166,
- 164, 1, 0, 0, 0, 167, 168, 5, 36, 0, 0, 168, 169, 5, 21, 0, 0, 169, 176,
- 3, 2, 1, 0, 170, 171, 5, 17, 0, 0, 171, 172, 5, 36, 0, 0, 172, 173, 5,
- 21, 0, 0, 173, 175, 3, 2, 1, 0, 174, 170, 1, 0, 0, 0, 175, 178, 1, 0, 0,
- 0, 176, 174, 1, 0, 0, 0, 176, 177, 1, 0, 0, 0, 177, 21, 1, 0, 0, 0, 178,
- 176, 1, 0, 0, 0, 179, 180, 3, 2, 1, 0, 180, 181, 5, 21, 0, 0, 181, 189,
- 3, 2, 1, 0, 182, 183, 5, 17, 0, 0, 183, 184, 3, 2, 1, 0, 184, 185, 5, 21,
- 0, 0, 185, 186, 3, 2, 1, 0, 186, 188, 1, 0, 0, 0, 187, 182, 1, 0, 0, 0,
- 188, 191, 1, 0, 0, 0, 189, 187, 1, 0, 0, 0, 189, 190, 1, 0, 0, 0, 190,
- 23, 1, 0, 0, 0, 191, 189, 1, 0, 0, 0, 192, 194, 5, 18, 0, 0, 193, 192,
- 1, 0, 0, 0, 193, 194, 1, 0, 0, 0, 194, 195, 1, 0, 0, 0, 195, 207, 5, 32,
- 0, 0, 196, 207, 5, 33, 0, 0, 197, 199, 5, 18, 0, 0, 198, 197, 1, 0, 0,
- 0, 198, 199, 1, 0, 0, 0, 199, 200, 1, 0, 0, 0, 200, 207, 5, 31, 0, 0, 201,
- 207, 5, 34, 0, 0, 202, 207, 5, 35, 0, 0, 203, 207, 5, 26, 0, 0, 204, 207,
- 5, 27, 0, 0, 205, 207, 5, 28, 0, 0, 206, 193, 1, 0, 0, 0, 206, 196, 1,
- 0, 0, 0, 206, 198, 1, 0, 0, 0, 206, 201, 1, 0, 0, 0, 206, 202, 1, 0, 0,
- 0, 206, 203, 1, 0, 0, 0, 206, 204, 1, 0, 0, 0, 206, 205, 1, 0, 0, 0, 207,
- 25, 1, 0, 0, 0, 29, 35, 42, 50, 61, 73, 75, 82, 88, 91, 101, 104, 114,
- 117, 120, 122, 126, 131, 134, 142, 145, 150, 153, 157, 164, 176, 189, 193,
- 198, 206,
+ 10, 2, 11, 7, 11, 2, 12, 7, 12, 2, 13, 7, 13, 2, 14, 7, 14, 2, 15, 7, 15,
+ 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 1, 42, 8, 1, 1,
+ 2, 1, 2, 1, 2, 5, 2, 47, 8, 2, 10, 2, 12, 2, 50, 9, 2, 1, 3, 1, 3, 1, 3,
+ 5, 3, 55, 8, 3, 10, 3, 12, 3, 58, 9, 3, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1,
+ 4, 5, 4, 66, 8, 4, 10, 4, 12, 4, 69, 9, 4, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5,
+ 1, 5, 1, 5, 1, 5, 1, 5, 5, 5, 80, 8, 5, 10, 5, 12, 5, 83, 9, 5, 1, 6, 1,
+ 6, 4, 6, 87, 8, 6, 11, 6, 12, 6, 88, 1, 6, 1, 6, 4, 6, 93, 8, 6, 11, 6,
+ 12, 6, 94, 1, 6, 3, 6, 98, 8, 6, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 3,
+ 7, 106, 8, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 3, 7, 114, 8, 7, 1, 7,
+ 1, 7, 1, 7, 1, 7, 3, 7, 120, 8, 7, 1, 7, 1, 7, 1, 7, 5, 7, 125, 8, 7, 10,
+ 7, 12, 7, 128, 9, 7, 1, 8, 3, 8, 131, 8, 8, 1, 8, 1, 8, 1, 8, 3, 8, 136,
+ 8, 8, 1, 8, 3, 8, 139, 8, 8, 1, 8, 1, 8, 1, 8, 1, 8, 1, 8, 1, 8, 3, 8,
+ 147, 8, 8, 1, 8, 3, 8, 150, 8, 8, 1, 8, 1, 8, 1, 8, 3, 8, 155, 8, 8, 1,
+ 8, 3, 8, 158, 8, 8, 1, 8, 1, 8, 3, 8, 162, 8, 8, 1, 8, 1, 8, 1, 8, 5, 8,
+ 167, 8, 8, 10, 8, 12, 8, 170, 9, 8, 1, 8, 1, 8, 3, 8, 174, 8, 8, 1, 8,
+ 3, 8, 177, 8, 8, 1, 8, 1, 8, 3, 8, 181, 8, 8, 1, 9, 1, 9, 1, 9, 5, 9, 186,
+ 8, 9, 10, 9, 12, 9, 189, 9, 9, 1, 10, 1, 10, 1, 10, 5, 10, 194, 8, 10,
+ 10, 10, 12, 10, 197, 9, 10, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 1,
+ 11, 1, 11, 5, 11, 207, 8, 11, 10, 11, 12, 11, 210, 9, 11, 1, 12, 3, 12,
+ 213, 8, 12, 1, 12, 1, 12, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1,
+ 13, 1, 13, 5, 13, 225, 8, 13, 10, 13, 12, 13, 228, 9, 13, 1, 14, 3, 14,
+ 231, 8, 14, 1, 14, 1, 14, 1, 15, 3, 15, 236, 8, 15, 1, 15, 1, 15, 1, 15,
+ 3, 15, 241, 8, 15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 3, 15, 249,
+ 8, 15, 1, 15, 0, 3, 8, 10, 14, 16, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20,
+ 22, 24, 26, 28, 30, 0, 3, 1, 0, 1, 7, 1, 0, 23, 25, 2, 0, 18, 18, 22, 22,
+ 281, 0, 32, 1, 0, 0, 0, 2, 35, 1, 0, 0, 0, 4, 43, 1, 0, 0, 0, 6, 51, 1,
+ 0, 0, 0, 8, 59, 1, 0, 0, 0, 10, 70, 1, 0, 0, 0, 12, 97, 1, 0, 0, 0, 14,
+ 99, 1, 0, 0, 0, 16, 180, 1, 0, 0, 0, 18, 182, 1, 0, 0, 0, 20, 190, 1, 0,
+ 0, 0, 22, 198, 1, 0, 0, 0, 24, 212, 1, 0, 0, 0, 26, 216, 1, 0, 0, 0, 28,
+ 230, 1, 0, 0, 0, 30, 248, 1, 0, 0, 0, 32, 33, 3, 2, 1, 0, 33, 34, 5, 0,
+ 0, 1, 34, 1, 1, 0, 0, 0, 35, 41, 3, 4, 2, 0, 36, 37, 5, 20, 0, 0, 37, 38,
+ 3, 4, 2, 0, 38, 39, 5, 21, 0, 0, 39, 40, 3, 2, 1, 0, 40, 42, 1, 0, 0, 0,
+ 41, 36, 1, 0, 0, 0, 41, 42, 1, 0, 0, 0, 42, 3, 1, 0, 0, 0, 43, 48, 3, 6,
+ 3, 0, 44, 45, 5, 9, 0, 0, 45, 47, 3, 6, 3, 0, 46, 44, 1, 0, 0, 0, 47, 50,
+ 1, 0, 0, 0, 48, 46, 1, 0, 0, 0, 48, 49, 1, 0, 0, 0, 49, 5, 1, 0, 0, 0,
+ 50, 48, 1, 0, 0, 0, 51, 56, 3, 8, 4, 0, 52, 53, 5, 8, 0, 0, 53, 55, 3,
+ 8, 4, 0, 54, 52, 1, 0, 0, 0, 55, 58, 1, 0, 0, 0, 56, 54, 1, 0, 0, 0, 56,
+ 57, 1, 0, 0, 0, 57, 7, 1, 0, 0, 0, 58, 56, 1, 0, 0, 0, 59, 60, 6, 4, -1,
+ 0, 60, 61, 3, 10, 5, 0, 61, 67, 1, 0, 0, 0, 62, 63, 10, 1, 0, 0, 63, 64,
+ 7, 0, 0, 0, 64, 66, 3, 8, 4, 2, 65, 62, 1, 0, 0, 0, 66, 69, 1, 0, 0, 0,
+ 67, 65, 1, 0, 0, 0, 67, 68, 1, 0, 0, 0, 68, 9, 1, 0, 0, 0, 69, 67, 1, 0,
+ 0, 0, 70, 71, 6, 5, -1, 0, 71, 72, 3, 12, 6, 0, 72, 81, 1, 0, 0, 0, 73,
+ 74, 10, 2, 0, 0, 74, 75, 7, 1, 0, 0, 75, 80, 3, 10, 5, 3, 76, 77, 10, 1,
+ 0, 0, 77, 78, 7, 2, 0, 0, 78, 80, 3, 10, 5, 2, 79, 73, 1, 0, 0, 0, 79,
+ 76, 1, 0, 0, 0, 80, 83, 1, 0, 0, 0, 81, 79, 1, 0, 0, 0, 81, 82, 1, 0, 0,
+ 0, 82, 11, 1, 0, 0, 0, 83, 81, 1, 0, 0, 0, 84, 98, 3, 14, 7, 0, 85, 87,
+ 5, 19, 0, 0, 86, 85, 1, 0, 0, 0, 87, 88, 1, 0, 0, 0, 88, 86, 1, 0, 0, 0,
+ 88, 89, 1, 0, 0, 0, 89, 90, 1, 0, 0, 0, 90, 98, 3, 14, 7, 0, 91, 93, 5,
+ 18, 0, 0, 92, 91, 1, 0, 0, 0, 93, 94, 1, 0, 0, 0, 94, 92, 1, 0, 0, 0, 94,
+ 95, 1, 0, 0, 0, 95, 96, 1, 0, 0, 0, 96, 98, 3, 14, 7, 0, 97, 84, 1, 0,
+ 0, 0, 97, 86, 1, 0, 0, 0, 97, 92, 1, 0, 0, 0, 98, 13, 1, 0, 0, 0, 99, 100,
+ 6, 7, -1, 0, 100, 101, 3, 16, 8, 0, 101, 126, 1, 0, 0, 0, 102, 103, 10,
+ 3, 0, 0, 103, 105, 5, 16, 0, 0, 104, 106, 5, 20, 0, 0, 105, 104, 1, 0,
+ 0, 0, 105, 106, 1, 0, 0, 0, 106, 107, 1, 0, 0, 0, 107, 125, 5, 36, 0, 0,
+ 108, 109, 10, 2, 0, 0, 109, 110, 5, 16, 0, 0, 110, 111, 5, 36, 0, 0, 111,
+ 113, 5, 14, 0, 0, 112, 114, 3, 18, 9, 0, 113, 112, 1, 0, 0, 0, 113, 114,
+ 1, 0, 0, 0, 114, 115, 1, 0, 0, 0, 115, 125, 5, 15, 0, 0, 116, 117, 10,
+ 1, 0, 0, 117, 119, 5, 10, 0, 0, 118, 120, 5, 20, 0, 0, 119, 118, 1, 0,
+ 0, 0, 119, 120, 1, 0, 0, 0, 120, 121, 1, 0, 0, 0, 121, 122, 3, 2, 1, 0,
+ 122, 123, 5, 11, 0, 0, 123, 125, 1, 0, 0, 0, 124, 102, 1, 0, 0, 0, 124,
+ 108, 1, 0, 0, 0, 124, 116, 1, 0, 0, 0, 125, 128, 1, 0, 0, 0, 126, 124,
+ 1, 0, 0, 0, 126, 127, 1, 0, 0, 0, 127, 15, 1, 0, 0, 0, 128, 126, 1, 0,
+ 0, 0, 129, 131, 5, 16, 0, 0, 130, 129, 1, 0, 0, 0, 130, 131, 1, 0, 0, 0,
+ 131, 132, 1, 0, 0, 0, 132, 138, 5, 36, 0, 0, 133, 135, 5, 14, 0, 0, 134,
+ 136, 3, 18, 9, 0, 135, 134, 1, 0, 0, 0, 135, 136, 1, 0, 0, 0, 136, 137,
+ 1, 0, 0, 0, 137, 139, 5, 15, 0, 0, 138, 133, 1, 0, 0, 0, 138, 139, 1, 0,
+ 0, 0, 139, 181, 1, 0, 0, 0, 140, 141, 5, 14, 0, 0, 141, 142, 3, 2, 1, 0,
+ 142, 143, 5, 15, 0, 0, 143, 181, 1, 0, 0, 0, 144, 146, 5, 10, 0, 0, 145,
+ 147, 3, 20, 10, 0, 146, 145, 1, 0, 0, 0, 146, 147, 1, 0, 0, 0, 147, 149,
+ 1, 0, 0, 0, 148, 150, 5, 17, 0, 0, 149, 148, 1, 0, 0, 0, 149, 150, 1, 0,
+ 0, 0, 150, 151, 1, 0, 0, 0, 151, 181, 5, 11, 0, 0, 152, 154, 5, 12, 0,
+ 0, 153, 155, 3, 26, 13, 0, 154, 153, 1, 0, 0, 0, 154, 155, 1, 0, 0, 0,
+ 155, 157, 1, 0, 0, 0, 156, 158, 5, 17, 0, 0, 157, 156, 1, 0, 0, 0, 157,
+ 158, 1, 0, 0, 0, 158, 159, 1, 0, 0, 0, 159, 181, 5, 13, 0, 0, 160, 162,
+ 5, 16, 0, 0, 161, 160, 1, 0, 0, 0, 161, 162, 1, 0, 0, 0, 162, 163, 1, 0,
+ 0, 0, 163, 168, 5, 36, 0, 0, 164, 165, 5, 16, 0, 0, 165, 167, 5, 36, 0,
+ 0, 166, 164, 1, 0, 0, 0, 167, 170, 1, 0, 0, 0, 168, 166, 1, 0, 0, 0, 168,
+ 169, 1, 0, 0, 0, 169, 171, 1, 0, 0, 0, 170, 168, 1, 0, 0, 0, 171, 173,
+ 5, 12, 0, 0, 172, 174, 3, 22, 11, 0, 173, 172, 1, 0, 0, 0, 173, 174, 1,
+ 0, 0, 0, 174, 176, 1, 0, 0, 0, 175, 177, 5, 17, 0, 0, 176, 175, 1, 0, 0,
+ 0, 176, 177, 1, 0, 0, 0, 177, 178, 1, 0, 0, 0, 178, 181, 5, 13, 0, 0, 179,
+ 181, 3, 30, 15, 0, 180, 130, 1, 0, 0, 0, 180, 140, 1, 0, 0, 0, 180, 144,
+ 1, 0, 0, 0, 180, 152, 1, 0, 0, 0, 180, 161, 1, 0, 0, 0, 180, 179, 1, 0,
+ 0, 0, 181, 17, 1, 0, 0, 0, 182, 187, 3, 2, 1, 0, 183, 184, 5, 17, 0, 0,
+ 184, 186, 3, 2, 1, 0, 185, 183, 1, 0, 0, 0, 186, 189, 1, 0, 0, 0, 187,
+ 185, 1, 0, 0, 0, 187, 188, 1, 0, 0, 0, 188, 19, 1, 0, 0, 0, 189, 187, 1,
+ 0, 0, 0, 190, 195, 3, 28, 14, 0, 191, 192, 5, 17, 0, 0, 192, 194, 3, 28,
+ 14, 0, 193, 191, 1, 0, 0, 0, 194, 197, 1, 0, 0, 0, 195, 193, 1, 0, 0, 0,
+ 195, 196, 1, 0, 0, 0, 196, 21, 1, 0, 0, 0, 197, 195, 1, 0, 0, 0, 198, 199,
+ 3, 24, 12, 0, 199, 200, 5, 21, 0, 0, 200, 208, 3, 2, 1, 0, 201, 202, 5,
+ 17, 0, 0, 202, 203, 3, 24, 12, 0, 203, 204, 5, 21, 0, 0, 204, 205, 3, 2,
+ 1, 0, 205, 207, 1, 0, 0, 0, 206, 201, 1, 0, 0, 0, 207, 210, 1, 0, 0, 0,
+ 208, 206, 1, 0, 0, 0, 208, 209, 1, 0, 0, 0, 209, 23, 1, 0, 0, 0, 210, 208,
+ 1, 0, 0, 0, 211, 213, 5, 20, 0, 0, 212, 211, 1, 0, 0, 0, 212, 213, 1, 0,
+ 0, 0, 213, 214, 1, 0, 0, 0, 214, 215, 5, 36, 0, 0, 215, 25, 1, 0, 0, 0,
+ 216, 217, 3, 28, 14, 0, 217, 218, 5, 21, 0, 0, 218, 226, 3, 2, 1, 0, 219,
+ 220, 5, 17, 0, 0, 220, 221, 3, 28, 14, 0, 221, 222, 5, 21, 0, 0, 222, 223,
+ 3, 2, 1, 0, 223, 225, 1, 0, 0, 0, 224, 219, 1, 0, 0, 0, 225, 228, 1, 0,
+ 0, 0, 226, 224, 1, 0, 0, 0, 226, 227, 1, 0, 0, 0, 227, 27, 1, 0, 0, 0,
+ 228, 226, 1, 0, 0, 0, 229, 231, 5, 20, 0, 0, 230, 229, 1, 0, 0, 0, 230,
+ 231, 1, 0, 0, 0, 231, 232, 1, 0, 0, 0, 232, 233, 3, 2, 1, 0, 233, 29, 1,
+ 0, 0, 0, 234, 236, 5, 18, 0, 0, 235, 234, 1, 0, 0, 0, 235, 236, 1, 0, 0,
+ 0, 236, 237, 1, 0, 0, 0, 237, 249, 5, 32, 0, 0, 238, 249, 5, 33, 0, 0,
+ 239, 241, 5, 18, 0, 0, 240, 239, 1, 0, 0, 0, 240, 241, 1, 0, 0, 0, 241,
+ 242, 1, 0, 0, 0, 242, 249, 5, 31, 0, 0, 243, 249, 5, 34, 0, 0, 244, 249,
+ 5, 35, 0, 0, 245, 249, 5, 26, 0, 0, 246, 249, 5, 27, 0, 0, 247, 249, 5,
+ 28, 0, 0, 248, 235, 1, 0, 0, 0, 248, 238, 1, 0, 0, 0, 248, 240, 1, 0, 0,
+ 0, 248, 243, 1, 0, 0, 0, 248, 244, 1, 0, 0, 0, 248, 245, 1, 0, 0, 0, 248,
+ 246, 1, 0, 0, 0, 248, 247, 1, 0, 0, 0, 249, 31, 1, 0, 0, 0, 35, 41, 48,
+ 56, 67, 79, 81, 88, 94, 97, 105, 113, 119, 124, 126, 130, 135, 138, 146,
+ 149, 154, 157, 161, 168, 173, 176, 180, 187, 195, 208, 212, 226, 230, 235,
+ 240, 248,
}
deserializer := antlr.NewATNDeserializer(nil)
staticData.atn = deserializer.Deserialize(staticData.serializedATN)
@@ -234,9 +253,12 @@ const (
CELParserRULE_member = 7
CELParserRULE_primary = 8
CELParserRULE_exprList = 9
- CELParserRULE_fieldInitializerList = 10
- CELParserRULE_mapInitializerList = 11
- CELParserRULE_literal = 12
+ CELParserRULE_listInit = 10
+ CELParserRULE_fieldInitializerList = 11
+ CELParserRULE_optField = 12
+ CELParserRULE_mapInitializerList = 13
+ CELParserRULE_optExpr = 14
+ CELParserRULE_literal = 15
)
// IStartContext is an interface to support dynamic dispatch.
@@ -252,6 +274,10 @@ type IStartContext interface {
// SetE sets the e rule contexts.
SetE(IExprContext)
+ // Getter signatures
+ EOF() antlr.TerminalNode
+ Expr() IExprContext
+
// IsStartContext differentiates from other interfaces.
IsStartContext()
}
@@ -363,14 +389,14 @@ func (p *CELParser) Start() (localctx IStartContext) {
p.EnterOuterAlt(localctx, 1)
{
- p.SetState(26)
+ p.SetState(32)
var _x = p.Expr()
localctx.(*StartContext).e = _x
}
{
- p.SetState(27)
+ p.SetState(33)
p.Match(CELParserEOF)
}
@@ -408,6 +434,13 @@ type IExprContext interface {
// SetE2 sets the e2 rule contexts.
SetE2(IExprContext)
+ // Getter signatures
+ AllConditionalOr() []IConditionalOrContext
+ ConditionalOr(i int) IConditionalOrContext
+ COLON() antlr.TerminalNode
+ QUESTIONMARK() antlr.TerminalNode
+ Expr() IExprContext
+
// IsExprContext differentiates from other interfaces.
IsExprContext()
}
@@ -580,37 +613,37 @@ func (p *CELParser) Expr() (localctx IExprContext) {
p.EnterOuterAlt(localctx, 1)
{
- p.SetState(29)
+ p.SetState(35)
var _x = p.ConditionalOr()
localctx.(*ExprContext).e = _x
}
- p.SetState(35)
+ p.SetState(41)
p.GetErrorHandler().Sync(p)
_la = p.GetTokenStream().LA(1)
if _la == CELParserQUESTIONMARK {
{
- p.SetState(30)
+ p.SetState(36)
var _m = p.Match(CELParserQUESTIONMARK)
localctx.(*ExprContext).op = _m
}
{
- p.SetState(31)
+ p.SetState(37)
var _x = p.ConditionalOr()
localctx.(*ExprContext).e1 = _x
}
{
- p.SetState(32)
+ p.SetState(38)
p.Match(CELParserCOLON)
}
{
- p.SetState(33)
+ p.SetState(39)
var _x = p.Expr()
@@ -659,6 +692,12 @@ type IConditionalOrContext interface {
// SetE1 sets the e1 rule context list.
SetE1([]IConditionalAndContext)
+ // Getter signatures
+ AllConditionalAnd() []IConditionalAndContext
+ ConditionalAnd(i int) IConditionalAndContext
+ AllLOGICAL_OR() []antlr.TerminalNode
+ LOGICAL_OR(i int) antlr.TerminalNode
+
// IsConditionalOrContext differentiates from other interfaces.
IsConditionalOrContext()
}
@@ -820,19 +859,19 @@ func (p *CELParser) ConditionalOr() (localctx IConditionalOrContext) {
p.EnterOuterAlt(localctx, 1)
{
- p.SetState(37)
+ p.SetState(43)
var _x = p.ConditionalAnd()
localctx.(*ConditionalOrContext).e = _x
}
- p.SetState(42)
+ p.SetState(48)
p.GetErrorHandler().Sync(p)
_la = p.GetTokenStream().LA(1)
for _la == CELParserLOGICAL_OR {
{
- p.SetState(38)
+ p.SetState(44)
var _m = p.Match(CELParserLOGICAL_OR)
@@ -840,7 +879,7 @@ func (p *CELParser) ConditionalOr() (localctx IConditionalOrContext) {
}
localctx.(*ConditionalOrContext).ops = append(localctx.(*ConditionalOrContext).ops, localctx.(*ConditionalOrContext).s9)
{
- p.SetState(39)
+ p.SetState(45)
var _x = p.ConditionalAnd()
@@ -848,7 +887,7 @@ func (p *CELParser) ConditionalOr() (localctx IConditionalOrContext) {
}
localctx.(*ConditionalOrContext).e1 = append(localctx.(*ConditionalOrContext).e1, localctx.(*ConditionalOrContext)._conditionalAnd)
- p.SetState(44)
+ p.SetState(50)
p.GetErrorHandler().Sync(p)
_la = p.GetTokenStream().LA(1)
}
@@ -893,6 +932,12 @@ type IConditionalAndContext interface {
// SetE1 sets the e1 rule context list.
SetE1([]IRelationContext)
+ // Getter signatures
+ AllRelation() []IRelationContext
+ Relation(i int) IRelationContext
+ AllLOGICAL_AND() []antlr.TerminalNode
+ LOGICAL_AND(i int) antlr.TerminalNode
+
// IsConditionalAndContext differentiates from other interfaces.
IsConditionalAndContext()
}
@@ -1054,19 +1099,19 @@ func (p *CELParser) ConditionalAnd() (localctx IConditionalAndContext) {
p.EnterOuterAlt(localctx, 1)
{
- p.SetState(45)
+ p.SetState(51)
var _x = p.relation(0)
localctx.(*ConditionalAndContext).e = _x
}
- p.SetState(50)
+ p.SetState(56)
p.GetErrorHandler().Sync(p)
_la = p.GetTokenStream().LA(1)
for _la == CELParserLOGICAL_AND {
{
- p.SetState(46)
+ p.SetState(52)
var _m = p.Match(CELParserLOGICAL_AND)
@@ -1074,7 +1119,7 @@ func (p *CELParser) ConditionalAnd() (localctx IConditionalAndContext) {
}
localctx.(*ConditionalAndContext).ops = append(localctx.(*ConditionalAndContext).ops, localctx.(*ConditionalAndContext).s8)
{
- p.SetState(47)
+ p.SetState(53)
var _x = p.relation(0)
@@ -1082,7 +1127,7 @@ func (p *CELParser) ConditionalAnd() (localctx IConditionalAndContext) {
}
localctx.(*ConditionalAndContext).e1 = append(localctx.(*ConditionalAndContext).e1, localctx.(*ConditionalAndContext)._relation)
- p.SetState(52)
+ p.SetState(58)
p.GetErrorHandler().Sync(p)
_la = p.GetTokenStream().LA(1)
}
@@ -1103,6 +1148,18 @@ type IRelationContext interface {
// SetOp sets the op token.
SetOp(antlr.Token)
+ // Getter signatures
+ Calc() ICalcContext
+ AllRelation() []IRelationContext
+ Relation(i int) IRelationContext
+ LESS() antlr.TerminalNode
+ LESS_EQUALS() antlr.TerminalNode
+ GREATER_EQUALS() antlr.TerminalNode
+ GREATER() antlr.TerminalNode
+ EQUALS() antlr.TerminalNode
+ NOT_EQUALS() antlr.TerminalNode
+ IN() antlr.TerminalNode
+
// IsRelationContext differentiates from other interfaces.
IsRelationContext()
}
@@ -1291,12 +1348,12 @@ func (p *CELParser) relation(_p int) (localctx IRelationContext) {
p.EnterOuterAlt(localctx, 1)
{
- p.SetState(54)
+ p.SetState(60)
p.calc(0)
}
p.GetParserRuleContext().SetStop(p.GetTokenStream().LT(-1))
- p.SetState(61)
+ p.SetState(67)
p.GetErrorHandler().Sync(p)
_alt = p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 3, p.GetParserRuleContext())
@@ -1308,13 +1365,13 @@ func (p *CELParser) relation(_p int) (localctx IRelationContext) {
_prevctx = localctx
localctx = NewRelationContext(p, _parentctx, _parentState)
p.PushNewRecursionContext(localctx, _startState, CELParserRULE_relation)
- p.SetState(56)
+ p.SetState(62)
if !(p.Precpred(p.GetParserRuleContext(), 1)) {
panic(antlr.NewFailedPredicateException(p, "p.Precpred(p.GetParserRuleContext(), 1)", ""))
}
{
- p.SetState(57)
+ p.SetState(63)
var _lt = p.GetTokenStream().LT(1)
@@ -1322,7 +1379,7 @@ func (p *CELParser) relation(_p int) (localctx IRelationContext) {
_la = p.GetTokenStream().LA(1)
- if !(((_la)&-(0x1f+1)) == 0 && ((1<.map(, )
-// .map(, , )
+//
+// .map(, )
+// .map(, , )
+//
// In the second form only iterVar values which return true when provided to the predicate expression
// are transformed.
func MakeMap(eh ExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *common.Error) {
diff --git a/vendor/github.com/google/cel-go/parser/options.go b/vendor/github.com/google/cel-go/parser/options.go
index b50686a912..674c697c5c 100644
--- a/vendor/github.com/google/cel-go/parser/options.go
+++ b/vendor/github.com/google/cel-go/parser/options.go
@@ -18,11 +18,13 @@ import "fmt"
type options struct {
maxRecursionDepth int
+ errorReportingLimit int
errorRecoveryTokenLookaheadLimit int
errorRecoveryLimit int
expressionSizeCodePointLimit int
macros map[string]Macro
populateMacroCalls bool
+ enableOptionalSyntax bool
}
// Option configures the behavior of the parser.
@@ -45,7 +47,7 @@ func MaxRecursionDepth(limit int) Option {
// successfully resume. In some pathological cases, the parser can look through quite a large set of input which
// in turn generates a lot of back-tracking and performance degredation.
//
-// The limit must be > 1, and is recommended to be less than the default of 256.
+// The limit must be >= 1, and is recommended to be less than the default of 256.
func ErrorRecoveryLookaheadTokenLimit(limit int) Option {
return func(opts *options) error {
if limit < 1 {
@@ -67,6 +69,19 @@ func ErrorRecoveryLimit(limit int) Option {
}
}
+// ErrorReportingLimit limits the number of syntax error reports before terminating parsing.
+//
+// The limit must be at least 1. If unset, the limit will be 100.
+func ErrorReportingLimit(limit int) Option {
+ return func(opts *options) error {
+ if limit < 1 {
+ return fmt.Errorf("error reporting limit must be at least 1: %d", limit)
+ }
+ opts.errorReportingLimit = limit
+ return nil
+ }
+}
+
// ExpressionSizeCodePointLimit is an option which limits the maximum code point count of an
// expression.
func ExpressionSizeCodePointLimit(expressionSizeCodePointLimit int) Option {
@@ -102,3 +117,11 @@ func PopulateMacroCalls(populateMacroCalls bool) Option {
return nil
}
}
+
+// EnableOptionalSyntax enables syntax for optional field and index selection.
+func EnableOptionalSyntax(optionalSyntax bool) Option {
+ return func(opts *options) error {
+ opts.enableOptionalSyntax = optionalSyntax
+ return nil
+ }
+}
diff --git a/vendor/github.com/google/cel-go/parser/parser.go b/vendor/github.com/google/cel-go/parser/parser.go
index 072f624574..e6f70f9060 100644
--- a/vendor/github.com/google/cel-go/parser/parser.go
+++ b/vendor/github.com/google/cel-go/parser/parser.go
@@ -18,11 +18,13 @@ package parser
import (
"fmt"
+ "regexp"
"strconv"
"strings"
"sync"
- "github.com/antlr/antlr4/runtime/Go/antlr"
+ antlr "github.com/antlr/antlr4/runtime/Go/antlr/v4"
+
"github.com/google/cel-go/common"
"github.com/google/cel-go/common/operators"
"github.com/google/cel-go/common/runes"
@@ -45,6 +47,9 @@ func NewParser(opts ...Option) (*Parser, error) {
return nil, err
}
}
+ if p.errorReportingLimit == 0 {
+ p.errorReportingLimit = 100
+ }
if p.maxRecursionDepth == 0 {
p.maxRecursionDepth = 250
}
@@ -89,9 +94,11 @@ func (p *Parser) Parse(source common.Source) (*exprpb.ParsedExpr, *common.Errors
helper: newParserHelper(source),
macros: p.macros,
maxRecursionDepth: p.maxRecursionDepth,
+ errorReportingLimit: p.errorReportingLimit,
errorRecoveryLimit: p.errorRecoveryLimit,
errorRecoveryLookaheadTokenLimit: p.errorRecoveryTokenLookaheadLimit,
populateMacroCalls: p.populateMacroCalls,
+ enableOptionalSyntax: p.enableOptionalSyntax,
}
buf, ok := source.(runes.Buffer)
if !ok {
@@ -178,7 +185,7 @@ func (rl *recursionListener) EnterEveryRule(ctx antlr.ParserRuleContext) {
} else {
*depth++
}
- if *depth >= rl.maxDepth {
+ if *depth > rl.maxDepth {
panic(&recursionError{
message: fmt.Sprintf("expression recursion limit exceeded: %d", rl.maxDepth),
})
@@ -197,6 +204,16 @@ func (rl *recursionListener) ExitEveryRule(ctx antlr.ParserRuleContext) {
var _ antlr.ParseTreeListener = &recursionListener{}
+type tooManyErrors struct {
+ errorReportingLimit int
+}
+
+func (t *tooManyErrors) Error() string {
+ return fmt.Sprintf("More than %d syntax errors", t.errorReportingLimit)
+}
+
+var _ error = &tooManyErrors{}
+
type recoveryLimitError struct {
message string
}
@@ -271,17 +288,20 @@ type parser struct {
helper *parserHelper
macros map[string]Macro
recursionDepth int
+ errorReports int
maxRecursionDepth int
+ errorReportingLimit int
errorRecoveryLimit int
errorRecoveryLookaheadTokenLimit int
populateMacroCalls bool
+ enableOptionalSyntax bool
}
var (
_ gen.CELVisitor = (*parser)(nil)
lexerPool *sync.Pool = &sync.Pool{
- New: func() interface{} {
+ New: func() any {
l := gen.NewCELLexer(nil)
l.RemoveErrorListeners()
return l
@@ -289,7 +309,7 @@ var (
}
parserPool *sync.Pool = &sync.Pool{
- New: func() interface{} {
+ New: func() any {
p := gen.NewCELParser(nil)
p.RemoveErrorListeners()
return p
@@ -302,14 +322,14 @@ func (p *parser) parse(expr runes.Buffer, desc string) *exprpb.Expr {
lexer := lexerPool.Get().(*gen.CELLexer)
prsr := parserPool.Get().(*gen.CELParser)
- // Unfortunately ANTLR Go runtime is missing (*antlr.BaseParser).RemoveParseListeners, so this is
- // good enough until that is exported.
prsrListener := &recursionListener{
maxDepth: p.maxRecursionDepth,
ruleTypeDepth: map[int]*int{},
}
defer func() {
+ // Unfortunately ANTLR Go runtime is missing (*antlr.BaseParser).RemoveParseListeners,
+ // so this is good enough until that is exported.
// Reset the lexer and parser before putting them back in the pool.
lexer.RemoveErrorListeners()
prsr.RemoveParseListener(prsrListener)
@@ -340,6 +360,8 @@ func (p *parser) parse(expr runes.Buffer, desc string) *exprpb.Expr {
p.errors.ReportError(common.NoLocation, err.Error())
case *recursionError:
p.errors.ReportError(common.NoLocation, err.Error())
+ case *tooManyErrors:
+ // do nothing
case *recoveryLimitError:
// do nothing, listeners already notified and error reported.
default:
@@ -352,57 +374,85 @@ func (p *parser) parse(expr runes.Buffer, desc string) *exprpb.Expr {
}
// Visitor implementations.
-func (p *parser) Visit(tree antlr.ParseTree) interface{} {
- p.recursionDepth++
- if p.recursionDepth > p.maxRecursionDepth {
- panic(&recursionError{message: "max recursion depth exceeded"})
- }
- defer func() {
- p.recursionDepth--
- }()
- switch tree.(type) {
+func (p *parser) Visit(tree antlr.ParseTree) any {
+ t := unnest(tree)
+ switch tree := t.(type) {
case *gen.StartContext:
- return p.VisitStart(tree.(*gen.StartContext))
+ return p.VisitStart(tree)
case *gen.ExprContext:
- return p.VisitExpr(tree.(*gen.ExprContext))
+ p.checkAndIncrementRecursionDepth()
+ out := p.VisitExpr(tree)
+ p.decrementRecursionDepth()
+ return out
case *gen.ConditionalAndContext:
- return p.VisitConditionalAnd(tree.(*gen.ConditionalAndContext))
+ return p.VisitConditionalAnd(tree)
case *gen.ConditionalOrContext:
- return p.VisitConditionalOr(tree.(*gen.ConditionalOrContext))
+ return p.VisitConditionalOr(tree)
case *gen.RelationContext:
- return p.VisitRelation(tree.(*gen.RelationContext))
+ p.checkAndIncrementRecursionDepth()
+ out := p.VisitRelation(tree)
+ p.decrementRecursionDepth()
+ return out
case *gen.CalcContext:
- return p.VisitCalc(tree.(*gen.CalcContext))
+ p.checkAndIncrementRecursionDepth()
+ out := p.VisitCalc(tree)
+ p.decrementRecursionDepth()
+ return out
case *gen.LogicalNotContext:
- return p.VisitLogicalNot(tree.(*gen.LogicalNotContext))
- case *gen.MemberExprContext:
- return p.VisitMemberExpr(tree.(*gen.MemberExprContext))
- case *gen.PrimaryExprContext:
- return p.VisitPrimaryExpr(tree.(*gen.PrimaryExprContext))
- case *gen.SelectOrCallContext:
- return p.VisitSelectOrCall(tree.(*gen.SelectOrCallContext))
+ return p.VisitLogicalNot(tree)
+ case *gen.IdentOrGlobalCallContext:
+ return p.VisitIdentOrGlobalCall(tree)
+ case *gen.SelectContext:
+ p.checkAndIncrementRecursionDepth()
+ out := p.VisitSelect(tree)
+ p.decrementRecursionDepth()
+ return out
+ case *gen.MemberCallContext:
+ p.checkAndIncrementRecursionDepth()
+ out := p.VisitMemberCall(tree)
+ p.decrementRecursionDepth()
+ return out
case *gen.MapInitializerListContext:
- return p.VisitMapInitializerList(tree.(*gen.MapInitializerListContext))
+ return p.VisitMapInitializerList(tree)
case *gen.NegateContext:
- return p.VisitNegate(tree.(*gen.NegateContext))
+ return p.VisitNegate(tree)
case *gen.IndexContext:
- return p.VisitIndex(tree.(*gen.IndexContext))
+ p.checkAndIncrementRecursionDepth()
+ out := p.VisitIndex(tree)
+ p.decrementRecursionDepth()
+ return out
case *gen.UnaryContext:
- return p.VisitUnary(tree.(*gen.UnaryContext))
+ return p.VisitUnary(tree)
case *gen.CreateListContext:
- return p.VisitCreateList(tree.(*gen.CreateListContext))
+ return p.VisitCreateList(tree)
case *gen.CreateMessageContext:
- return p.VisitCreateMessage(tree.(*gen.CreateMessageContext))
+ return p.VisitCreateMessage(tree)
case *gen.CreateStructContext:
- return p.VisitCreateStruct(tree.(*gen.CreateStructContext))
+ return p.VisitCreateStruct(tree)
+ case *gen.IntContext:
+ return p.VisitInt(tree)
+ case *gen.UintContext:
+ return p.VisitUint(tree)
+ case *gen.DoubleContext:
+ return p.VisitDouble(tree)
+ case *gen.StringContext:
+ return p.VisitString(tree)
+ case *gen.BytesContext:
+ return p.VisitBytes(tree)
+ case *gen.BoolFalseContext:
+ return p.VisitBoolFalse(tree)
+ case *gen.BoolTrueContext:
+ return p.VisitBoolTrue(tree)
+ case *gen.NullContext:
+ return p.VisitNull(tree)
}
// Report at least one error if the parser reaches an unknown parse element.
// Typically, this happens if the parser has already encountered a syntax error elsewhere.
if len(p.errors.GetErrors()) == 0 {
txt := "<>"
- if tree != nil {
- txt = fmt.Sprintf("<<%T>>", tree)
+ if t != nil {
+ txt = fmt.Sprintf("<<%T>>", t)
}
return p.reportError(common.NoLocation, "unknown parse element encountered: %s", txt)
}
@@ -411,12 +461,12 @@ func (p *parser) Visit(tree antlr.ParseTree) interface{} {
}
// Visit a parse tree produced by CELParser#start.
-func (p *parser) VisitStart(ctx *gen.StartContext) interface{} {
+func (p *parser) VisitStart(ctx *gen.StartContext) any {
return p.Visit(ctx.Expr())
}
// Visit a parse tree produced by CELParser#expr.
-func (p *parser) VisitExpr(ctx *gen.ExprContext) interface{} {
+func (p *parser) VisitExpr(ctx *gen.ExprContext) any {
result := p.Visit(ctx.GetE()).(*exprpb.Expr)
if ctx.GetOp() == nil {
return result
@@ -428,11 +478,8 @@ func (p *parser) VisitExpr(ctx *gen.ExprContext) interface{} {
}
// Visit a parse tree produced by CELParser#conditionalOr.
-func (p *parser) VisitConditionalOr(ctx *gen.ConditionalOrContext) interface{} {
+func (p *parser) VisitConditionalOr(ctx *gen.ConditionalOrContext) any {
result := p.Visit(ctx.GetE()).(*exprpb.Expr)
- if ctx.GetOps() == nil {
- return result
- }
b := newBalancer(p.helper, operators.LogicalOr, result)
rest := ctx.GetE1()
for i, op := range ctx.GetOps() {
@@ -447,11 +494,8 @@ func (p *parser) VisitConditionalOr(ctx *gen.ConditionalOrContext) interface{} {
}
// Visit a parse tree produced by CELParser#conditionalAnd.
-func (p *parser) VisitConditionalAnd(ctx *gen.ConditionalAndContext) interface{} {
+func (p *parser) VisitConditionalAnd(ctx *gen.ConditionalAndContext) any {
result := p.Visit(ctx.GetE()).(*exprpb.Expr)
- if ctx.GetOps() == nil {
- return result
- }
b := newBalancer(p.helper, operators.LogicalAnd, result)
rest := ctx.GetE1()
for i, op := range ctx.GetOps() {
@@ -466,10 +510,7 @@ func (p *parser) VisitConditionalAnd(ctx *gen.ConditionalAndContext) interface{}
}
// Visit a parse tree produced by CELParser#relation.
-func (p *parser) VisitRelation(ctx *gen.RelationContext) interface{} {
- if ctx.Calc() != nil {
- return p.Visit(ctx.Calc())
- }
+func (p *parser) VisitRelation(ctx *gen.RelationContext) any {
opText := ""
if ctx.GetOp() != nil {
opText = ctx.GetOp().GetText()
@@ -484,10 +525,7 @@ func (p *parser) VisitRelation(ctx *gen.RelationContext) interface{} {
}
// Visit a parse tree produced by CELParser#calc.
-func (p *parser) VisitCalc(ctx *gen.CalcContext) interface{} {
- if ctx.Unary() != nil {
- return p.Visit(ctx.Unary())
- }
+func (p *parser) VisitCalc(ctx *gen.CalcContext) any {
opText := ""
if ctx.GetOp() != nil {
opText = ctx.GetOp().GetText()
@@ -501,27 +539,12 @@ func (p *parser) VisitCalc(ctx *gen.CalcContext) interface{} {
return p.reportError(ctx, "operator not found")
}
-func (p *parser) VisitUnary(ctx *gen.UnaryContext) interface{} {
+func (p *parser) VisitUnary(ctx *gen.UnaryContext) any {
return p.helper.newLiteralString(ctx, "<>")
}
-// Visit a parse tree produced by CELParser#MemberExpr.
-func (p *parser) VisitMemberExpr(ctx *gen.MemberExprContext) interface{} {
- switch ctx.Member().(type) {
- case *gen.PrimaryExprContext:
- return p.VisitPrimaryExpr(ctx.Member().(*gen.PrimaryExprContext))
- case *gen.SelectOrCallContext:
- return p.VisitSelectOrCall(ctx.Member().(*gen.SelectOrCallContext))
- case *gen.IndexContext:
- return p.VisitIndex(ctx.Member().(*gen.IndexContext))
- case *gen.CreateMessageContext:
- return p.VisitCreateMessage(ctx.Member().(*gen.CreateMessageContext))
- }
- return p.reportError(ctx, "unsupported simple expression")
-}
-
// Visit a parse tree produced by CELParser#LogicalNot.
-func (p *parser) VisitLogicalNot(ctx *gen.LogicalNotContext) interface{} {
+func (p *parser) VisitLogicalNot(ctx *gen.LogicalNotContext) any {
if len(ctx.GetOps())%2 == 0 {
return p.Visit(ctx.Member())
}
@@ -530,7 +553,7 @@ func (p *parser) VisitLogicalNot(ctx *gen.LogicalNotContext) interface{} {
return p.globalCallOrMacro(opID, operators.LogicalNot, target)
}
-func (p *parser) VisitNegate(ctx *gen.NegateContext) interface{} {
+func (p *parser) VisitNegate(ctx *gen.NegateContext) any {
if len(ctx.GetOps())%2 == 0 {
return p.Visit(ctx.Member())
}
@@ -539,60 +562,77 @@ func (p *parser) VisitNegate(ctx *gen.NegateContext) interface{} {
return p.globalCallOrMacro(opID, operators.Negate, target)
}
-// Visit a parse tree produced by CELParser#SelectOrCall.
-func (p *parser) VisitSelectOrCall(ctx *gen.SelectOrCallContext) interface{} {
+// VisitSelect visits a parse tree produced by CELParser#Select.
+func (p *parser) VisitSelect(ctx *gen.SelectContext) any {
operand := p.Visit(ctx.Member()).(*exprpb.Expr)
// Handle the error case where no valid identifier is specified.
- if ctx.GetId() == nil {
+ if ctx.GetId() == nil || ctx.GetOp() == nil {
return p.helper.newExpr(ctx)
}
id := ctx.GetId().GetText()
- if ctx.GetOpen() != nil {
- opID := p.helper.id(ctx.GetOpen())
- return p.receiverCallOrMacro(opID, id, operand, p.visitList(ctx.GetArgs())...)
+ if ctx.GetOpt() != nil {
+ if !p.enableOptionalSyntax {
+ return p.reportError(ctx.GetOp(), "unsupported syntax '.?'")
+ }
+ return p.helper.newGlobalCall(
+ ctx.GetOp(),
+ operators.OptSelect,
+ operand,
+ p.helper.newLiteralString(ctx.GetId(), id))
}
return p.helper.newSelect(ctx.GetOp(), operand, id)
}
-// Visit a parse tree produced by CELParser#PrimaryExpr.
-func (p *parser) VisitPrimaryExpr(ctx *gen.PrimaryExprContext) interface{} {
- switch ctx.Primary().(type) {
- case *gen.NestedContext:
- return p.VisitNested(ctx.Primary().(*gen.NestedContext))
- case *gen.IdentOrGlobalCallContext:
- return p.VisitIdentOrGlobalCall(ctx.Primary().(*gen.IdentOrGlobalCallContext))
- case *gen.CreateListContext:
- return p.VisitCreateList(ctx.Primary().(*gen.CreateListContext))
- case *gen.CreateStructContext:
- return p.VisitCreateStruct(ctx.Primary().(*gen.CreateStructContext))
- case *gen.ConstantLiteralContext:
- return p.VisitConstantLiteral(ctx.Primary().(*gen.ConstantLiteralContext))
+// VisitMemberCall visits a parse tree produced by CELParser#MemberCall.
+func (p *parser) VisitMemberCall(ctx *gen.MemberCallContext) any {
+ operand := p.Visit(ctx.Member()).(*exprpb.Expr)
+ // Handle the error case where no valid identifier is specified.
+ if ctx.GetId() == nil {
+ return p.helper.newExpr(ctx)
}
-
- return p.reportError(ctx, "invalid primary expression")
+ id := ctx.GetId().GetText()
+ opID := p.helper.id(ctx.GetOpen())
+ return p.receiverCallOrMacro(opID, id, operand, p.visitExprList(ctx.GetArgs())...)
}
// Visit a parse tree produced by CELParser#Index.
-func (p *parser) VisitIndex(ctx *gen.IndexContext) interface{} {
+func (p *parser) VisitIndex(ctx *gen.IndexContext) any {
target := p.Visit(ctx.Member()).(*exprpb.Expr)
+ // Handle the error case where no valid identifier is specified.
+ if ctx.GetOp() == nil {
+ return p.helper.newExpr(ctx)
+ }
opID := p.helper.id(ctx.GetOp())
index := p.Visit(ctx.GetIndex()).(*exprpb.Expr)
- return p.globalCallOrMacro(opID, operators.Index, target, index)
+ operator := operators.Index
+ if ctx.GetOpt() != nil {
+ if !p.enableOptionalSyntax {
+ return p.reportError(ctx.GetOp(), "unsupported syntax '[?'")
+ }
+ operator = operators.OptIndex
+ }
+ return p.globalCallOrMacro(opID, operator, target, index)
}
// Visit a parse tree produced by CELParser#CreateMessage.
-func (p *parser) VisitCreateMessage(ctx *gen.CreateMessageContext) interface{} {
- target := p.Visit(ctx.Member()).(*exprpb.Expr)
- objID := p.helper.id(ctx.GetOp())
- if messageName, found := p.extractQualifiedName(target); found {
- entries := p.VisitIFieldInitializerList(ctx.GetEntries()).([]*exprpb.Expr_CreateStruct_Entry)
- return p.helper.newObject(objID, messageName, entries...)
+func (p *parser) VisitCreateMessage(ctx *gen.CreateMessageContext) any {
+ messageName := ""
+ for _, id := range ctx.GetIds() {
+ if len(messageName) != 0 {
+ messageName += "."
+ }
+ messageName += id.GetText()
+ }
+ if ctx.GetLeadingDot() != nil {
+ messageName = "." + messageName
}
- return p.helper.newExpr(objID)
+ objID := p.helper.id(ctx.GetOp())
+ entries := p.VisitIFieldInitializerList(ctx.GetEntries()).([]*exprpb.Expr_CreateStruct_Entry)
+ return p.helper.newObject(objID, messageName, entries...)
}
// Visit a parse tree of field initializers.
-func (p *parser) VisitIFieldInitializerList(ctx gen.IFieldInitializerListContext) interface{} {
+func (p *parser) VisitIFieldInitializerList(ctx gen.IFieldInitializerListContext) any {
if ctx == nil || ctx.GetFields() == nil {
// This is the result of a syntax error handled elswhere, return empty.
return []*exprpb.Expr_CreateStruct_Entry{}
@@ -607,15 +647,27 @@ func (p *parser) VisitIFieldInitializerList(ctx gen.IFieldInitializerListContext
return []*exprpb.Expr_CreateStruct_Entry{}
}
initID := p.helper.id(cols[i])
+ optField := f.(*gen.OptFieldContext)
+ optional := optField.GetOpt() != nil
+ if !p.enableOptionalSyntax && optional {
+ p.reportError(optField, "unsupported syntax '?'")
+ continue
+ }
+ // The field may be empty due to a prior error.
+ id := optField.IDENTIFIER()
+ if id == nil {
+ return []*exprpb.Expr_CreateStruct_Entry{}
+ }
+ fieldName := id.GetText()
value := p.Visit(vals[i]).(*exprpb.Expr)
- field := p.helper.newObjectField(initID, f.GetText(), value)
+ field := p.helper.newObjectField(initID, fieldName, value, optional)
result[i] = field
}
return result
}
// Visit a parse tree produced by CELParser#IdentOrGlobalCall.
-func (p *parser) VisitIdentOrGlobalCall(ctx *gen.IdentOrGlobalCallContext) interface{} {
+func (p *parser) VisitIdentOrGlobalCall(ctx *gen.IdentOrGlobalCallContext) any {
identName := ""
if ctx.GetLeadingDot() != nil {
identName = "."
@@ -632,24 +684,20 @@ func (p *parser) VisitIdentOrGlobalCall(ctx *gen.IdentOrGlobalCallContext) inter
identName += id
if ctx.GetOp() != nil {
opID := p.helper.id(ctx.GetOp())
- return p.globalCallOrMacro(opID, identName, p.visitList(ctx.GetArgs())...)
+ return p.globalCallOrMacro(opID, identName, p.visitExprList(ctx.GetArgs())...)
}
return p.helper.newIdent(ctx.GetId(), identName)
}
-// Visit a parse tree produced by CELParser#Nested.
-func (p *parser) VisitNested(ctx *gen.NestedContext) interface{} {
- return p.Visit(ctx.GetE())
-}
-
// Visit a parse tree produced by CELParser#CreateList.
-func (p *parser) VisitCreateList(ctx *gen.CreateListContext) interface{} {
+func (p *parser) VisitCreateList(ctx *gen.CreateListContext) any {
listID := p.helper.id(ctx.GetOp())
- return p.helper.newList(listID, p.visitList(ctx.GetElems())...)
+ elems, optionals := p.visitListInit(ctx.GetElems())
+ return p.helper.newList(listID, elems, optionals...)
}
// Visit a parse tree produced by CELParser#CreateStruct.
-func (p *parser) VisitCreateStruct(ctx *gen.CreateStructContext) interface{} {
+func (p *parser) VisitCreateStruct(ctx *gen.CreateStructContext) any {
structID := p.helper.id(ctx.GetOp())
entries := []*exprpb.Expr_CreateStruct_Entry{}
if ctx.GetEntries() != nil {
@@ -658,31 +706,8 @@ func (p *parser) VisitCreateStruct(ctx *gen.CreateStructContext) interface{} {
return p.helper.newMap(structID, entries...)
}
-// Visit a parse tree produced by CELParser#ConstantLiteral.
-func (p *parser) VisitConstantLiteral(ctx *gen.ConstantLiteralContext) interface{} {
- switch ctx.Literal().(type) {
- case *gen.IntContext:
- return p.VisitInt(ctx.Literal().(*gen.IntContext))
- case *gen.UintContext:
- return p.VisitUint(ctx.Literal().(*gen.UintContext))
- case *gen.DoubleContext:
- return p.VisitDouble(ctx.Literal().(*gen.DoubleContext))
- case *gen.StringContext:
- return p.VisitString(ctx.Literal().(*gen.StringContext))
- case *gen.BytesContext:
- return p.VisitBytes(ctx.Literal().(*gen.BytesContext))
- case *gen.BoolFalseContext:
- return p.VisitBoolFalse(ctx.Literal().(*gen.BoolFalseContext))
- case *gen.BoolTrueContext:
- return p.VisitBoolTrue(ctx.Literal().(*gen.BoolTrueContext))
- case *gen.NullContext:
- return p.VisitNull(ctx.Literal().(*gen.NullContext))
- }
- return p.reportError(ctx, "invalid literal")
-}
-
// Visit a parse tree produced by CELParser#mapInitializerList.
-func (p *parser) VisitMapInitializerList(ctx *gen.MapInitializerListContext) interface{} {
+func (p *parser) VisitMapInitializerList(ctx *gen.MapInitializerListContext) any {
if ctx == nil || ctx.GetKeys() == nil {
// This is the result of a syntax error handled elswhere, return empty.
return []*exprpb.Expr_CreateStruct_Entry{}
@@ -697,16 +722,22 @@ func (p *parser) VisitMapInitializerList(ctx *gen.MapInitializerListContext) int
// This is the result of a syntax error detected elsewhere.
return []*exprpb.Expr_CreateStruct_Entry{}
}
- key := p.Visit(keys[i]).(*exprpb.Expr)
+ optKey := keys[i]
+ optional := optKey.GetOpt() != nil
+ if !p.enableOptionalSyntax && optional {
+ p.reportError(optKey, "unsupported syntax '?'")
+ continue
+ }
+ key := p.Visit(optKey.GetE()).(*exprpb.Expr)
value := p.Visit(vals[i]).(*exprpb.Expr)
- entry := p.helper.newMapEntry(colID, key, value)
+ entry := p.helper.newMapEntry(colID, key, value, optional)
result[i] = entry
}
return result
}
// Visit a parse tree produced by CELParser#Int.
-func (p *parser) VisitInt(ctx *gen.IntContext) interface{} {
+func (p *parser) VisitInt(ctx *gen.IntContext) any {
text := ctx.GetTok().GetText()
base := 10
if strings.HasPrefix(text, "0x") {
@@ -724,7 +755,7 @@ func (p *parser) VisitInt(ctx *gen.IntContext) interface{} {
}
// Visit a parse tree produced by CELParser#Uint.
-func (p *parser) VisitUint(ctx *gen.UintContext) interface{} {
+func (p *parser) VisitUint(ctx *gen.UintContext) any {
text := ctx.GetTok().GetText()
// trim the 'u' designator included in the uint literal.
text = text[:len(text)-1]
@@ -741,7 +772,7 @@ func (p *parser) VisitUint(ctx *gen.UintContext) interface{} {
}
// Visit a parse tree produced by CELParser#Double.
-func (p *parser) VisitDouble(ctx *gen.DoubleContext) interface{} {
+func (p *parser) VisitDouble(ctx *gen.DoubleContext) any {
txt := ctx.GetTok().GetText()
if ctx.GetSign() != nil {
txt = ctx.GetSign().GetText() + txt
@@ -755,42 +786,66 @@ func (p *parser) VisitDouble(ctx *gen.DoubleContext) interface{} {
}
// Visit a parse tree produced by CELParser#String.
-func (p *parser) VisitString(ctx *gen.StringContext) interface{} {
+func (p *parser) VisitString(ctx *gen.StringContext) any {
s := p.unquote(ctx, ctx.GetText(), false)
return p.helper.newLiteralString(ctx, s)
}
// Visit a parse tree produced by CELParser#Bytes.
-func (p *parser) VisitBytes(ctx *gen.BytesContext) interface{} {
+func (p *parser) VisitBytes(ctx *gen.BytesContext) any {
b := []byte(p.unquote(ctx, ctx.GetTok().GetText()[1:], true))
return p.helper.newLiteralBytes(ctx, b)
}
// Visit a parse tree produced by CELParser#BoolTrue.
-func (p *parser) VisitBoolTrue(ctx *gen.BoolTrueContext) interface{} {
+func (p *parser) VisitBoolTrue(ctx *gen.BoolTrueContext) any {
return p.helper.newLiteralBool(ctx, true)
}
// Visit a parse tree produced by CELParser#BoolFalse.
-func (p *parser) VisitBoolFalse(ctx *gen.BoolFalseContext) interface{} {
+func (p *parser) VisitBoolFalse(ctx *gen.BoolFalseContext) any {
return p.helper.newLiteralBool(ctx, false)
}
// Visit a parse tree produced by CELParser#Null.
-func (p *parser) VisitNull(ctx *gen.NullContext) interface{} {
+func (p *parser) VisitNull(ctx *gen.NullContext) any {
return p.helper.newLiteral(ctx,
&exprpb.Constant{
ConstantKind: &exprpb.Constant_NullValue{
NullValue: structpb.NullValue_NULL_VALUE}})
}
-func (p *parser) visitList(ctx gen.IExprListContext) []*exprpb.Expr {
+func (p *parser) visitExprList(ctx gen.IExprListContext) []*exprpb.Expr {
if ctx == nil {
return []*exprpb.Expr{}
}
return p.visitSlice(ctx.GetE())
}
+func (p *parser) visitListInit(ctx gen.IListInitContext) ([]*exprpb.Expr, []int32) {
+ if ctx == nil {
+ return []*exprpb.Expr{}, []int32{}
+ }
+ elements := ctx.GetElems()
+ result := make([]*exprpb.Expr, len(elements))
+ optionals := []int32{}
+ for i, e := range elements {
+ ex := p.Visit(e.GetE()).(*exprpb.Expr)
+ if ex == nil {
+ return []*exprpb.Expr{}, []int32{}
+ }
+ result[i] = ex
+ if e.GetOpt() != nil {
+ if !p.enableOptionalSyntax {
+ p.reportError(e.GetOpt(), "unsupported syntax '?'")
+ continue
+ }
+ optionals = append(optionals, int32(i))
+ }
+ }
+ return result, optionals
+}
+
func (p *parser) visitSlice(expressions []gen.IExprContext) []*exprpb.Expr {
if expressions == nil {
return []*exprpb.Expr{}
@@ -803,26 +858,7 @@ func (p *parser) visitSlice(expressions []gen.IExprContext) []*exprpb.Expr {
return result
}
-func (p *parser) extractQualifiedName(e *exprpb.Expr) (string, bool) {
- if e == nil {
- return "", false
- }
- switch e.GetExprKind().(type) {
- case *exprpb.Expr_IdentExpr:
- return e.GetIdentExpr().GetName(), true
- case *exprpb.Expr_SelectExpr:
- s := e.GetSelectExpr()
- if prefix, found := p.extractQualifiedName(s.GetOperand()); found {
- return prefix + "." + s.GetField(), true
- }
- }
- // TODO: Add a method to Source to get location from character offset.
- location := p.helper.getLocation(e.GetId())
- p.reportError(location, "expected a qualified name")
- return "", false
-}
-
-func (p *parser) unquote(ctx interface{}, value string, isBytes bool) string {
+func (p *parser) unquote(ctx any, value string, isBytes bool) string {
text, err := unescape(value, isBytes)
if err != nil {
p.reportError(ctx, "%s", err.Error())
@@ -831,7 +867,7 @@ func (p *parser) unquote(ctx interface{}, value string, isBytes bool) string {
return text
}
-func (p *parser) reportError(ctx interface{}, format string, args ...interface{}) *exprpb.Expr {
+func (p *parser) reportError(ctx any, format string, args ...any) *exprpb.Expr {
var location common.Location
switch ctx.(type) {
case common.Location:
@@ -847,10 +883,24 @@ func (p *parser) reportError(ctx interface{}, format string, args ...interface{}
}
// ANTLR Parse listener implementations
-func (p *parser) SyntaxError(recognizer antlr.Recognizer, offendingSymbol interface{}, line, column int, msg string, e antlr.RecognitionException) {
- // TODO: Snippet
+func (p *parser) SyntaxError(recognizer antlr.Recognizer, offendingSymbol any, line, column int, msg string, e antlr.RecognitionException) {
l := p.helper.source.NewLocation(line, column)
- p.errors.syntaxError(l, msg)
+ // Hack to keep existing error messages consistent with previous versions of CEL when a reserved word
+ // is used as an identifier. This behavior needs to be overhauled to provide consistent, normalized error
+ // messages out of ANTLR to prevent future breaking changes related to error message content.
+ if strings.Contains(msg, "no viable alternative") {
+ msg = reservedIdentifier.ReplaceAllString(msg, mismatchedReservedIdentifier)
+ }
+ // Ensure that no more than 100 syntax errors are reported as this will halt attempts to recover from a
+ // seriously broken expression.
+ if p.errorReports < p.errorReportingLimit {
+ p.errorReports++
+ p.errors.syntaxError(l, msg)
+ } else {
+ tme := &tooManyErrors{errorReportingLimit: p.errorReportingLimit}
+ p.errors.syntaxError(l, tme.Error())
+ panic(tme)
+ }
}
func (p *parser) ReportAmbiguity(recognizer antlr.Parser, dfa *antlr.DFA, startIndex, stopIndex int, exact bool, ambigAlts *antlr.BitSet, configs antlr.ATNConfigSet) {
@@ -892,14 +942,95 @@ func (p *parser) expandMacro(exprID int64, function string, target *exprpb.Expr,
eh.parserHelper = p.helper
eh.id = exprID
expr, err := macro.Expander()(eh, target, args)
+ // An error indicates that the macro was matched, but the arguments were not well-formed.
if err != nil {
if err.Location != nil {
return p.reportError(err.Location, err.Message), true
}
return p.reportError(p.helper.getLocation(exprID), err.Message), true
}
+ // A nil value from the macro indicates that the macro implementation decided that
+ // an expansion should not be performed.
+ if expr == nil {
+ return nil, false
+ }
if p.populateMacroCalls {
p.helper.addMacroCall(expr.GetId(), function, target, args...)
}
return expr, true
}
+
+func (p *parser) checkAndIncrementRecursionDepth() {
+ p.recursionDepth++
+ if p.recursionDepth > p.maxRecursionDepth {
+ panic(&recursionError{message: "max recursion depth exceeded"})
+ }
+}
+
+func (p *parser) decrementRecursionDepth() {
+ p.recursionDepth--
+}
+
+// unnest traverses down the left-hand side of the parse graph until it encounters the first compound
+// parse node or the first leaf in the parse graph.
+func unnest(tree antlr.ParseTree) antlr.ParseTree {
+ for tree != nil {
+ switch t := tree.(type) {
+ case *gen.ExprContext:
+ // conditionalOr op='?' conditionalOr : expr
+ if t.GetOp() != nil {
+ return t
+ }
+ // conditionalOr
+ tree = t.GetE()
+ case *gen.ConditionalOrContext:
+ // conditionalAnd (ops=|| conditionalAnd)*
+ if t.GetOps() != nil && len(t.GetOps()) > 0 {
+ return t
+ }
+ // conditionalAnd
+ tree = t.GetE()
+ case *gen.ConditionalAndContext:
+ // relation (ops=&& relation)*
+ if t.GetOps() != nil && len(t.GetOps()) > 0 {
+ return t
+ }
+ // relation
+ tree = t.GetE()
+ case *gen.RelationContext:
+ // relation op relation
+ if t.GetOp() != nil {
+ return t
+ }
+ // calc
+ tree = t.Calc()
+ case *gen.CalcContext:
+ // calc op calc
+ if t.GetOp() != nil {
+ return t
+ }
+ // unary
+ tree = t.Unary()
+ case *gen.MemberExprContext:
+ // member expands to one of: primary, select, index, or create message
+ tree = t.Member()
+ case *gen.PrimaryExprContext:
+ // primary expands to one of identifier, nested, create list, create struct, literal
+ tree = t.Primary()
+ case *gen.NestedContext:
+ // contains a nested 'expr'
+ tree = t.GetE()
+ case *gen.ConstantLiteralContext:
+ // expands to a primitive literal
+ tree = t.Literal()
+ default:
+ return t
+ }
+ }
+ return tree
+}
+
+var (
+ reservedIdentifier = regexp.MustCompile("no viable alternative at input '.(true|false|null)'")
+ mismatchedReservedIdentifier = "mismatched input '$1' expecting IDENTIFIER"
+)
diff --git a/vendor/github.com/google/cel-go/parser/unparser.go b/vendor/github.com/google/cel-go/parser/unparser.go
index a459bb4a98..c3c40a0dd3 100644
--- a/vendor/github.com/google/cel-go/parser/unparser.go
+++ b/vendor/github.com/google/cel-go/parser/unparser.go
@@ -106,9 +106,15 @@ func (un *unparser) visitCall(expr *exprpb.Expr) error {
// ternary operator
case operators.Conditional:
return un.visitCallConditional(expr)
+ // optional select operator
+ case operators.OptSelect:
+ return un.visitOptSelect(expr)
// index operator
case operators.Index:
return un.visitCallIndex(expr)
+ // optional index operator
+ case operators.OptIndex:
+ return un.visitCallOptIndex(expr)
// unary operators
case operators.LogicalNot, operators.Negate:
return un.visitCallUnary(expr)
@@ -218,6 +224,14 @@ func (un *unparser) visitCallFunc(expr *exprpb.Expr) error {
}
func (un *unparser) visitCallIndex(expr *exprpb.Expr) error {
+ return un.visitCallIndexInternal(expr, "[")
+}
+
+func (un *unparser) visitCallOptIndex(expr *exprpb.Expr) error {
+ return un.visitCallIndexInternal(expr, "[?")
+}
+
+func (un *unparser) visitCallIndexInternal(expr *exprpb.Expr, op string) error {
c := expr.GetCallExpr()
args := c.GetArgs()
nested := isBinaryOrTernaryOperator(args[0])
@@ -225,7 +239,7 @@ func (un *unparser) visitCallIndex(expr *exprpb.Expr) error {
if err != nil {
return err
}
- un.str.WriteString("[")
+ un.str.WriteString(op)
err = un.visit(args[1])
if err != nil {
return err
@@ -262,6 +276,9 @@ func (un *unparser) visitConst(expr *exprpb.Expr) error {
// represent the float using the minimum required digits
d := strconv.FormatFloat(c.GetDoubleValue(), 'g', -1, 64)
un.str.WriteString(d)
+ if !strings.Contains(d, ".") {
+ un.str.WriteString(".0")
+ }
case *exprpb.Constant_Int64Value:
i := strconv.FormatInt(c.GetInt64Value(), 10)
un.str.WriteString(i)
@@ -289,8 +306,15 @@ func (un *unparser) visitIdent(expr *exprpb.Expr) error {
func (un *unparser) visitList(expr *exprpb.Expr) error {
l := expr.GetListExpr()
elems := l.GetElements()
+ optIndices := make(map[int]bool, len(elems))
+ for _, idx := range l.GetOptionalIndices() {
+ optIndices[int(idx)] = true
+ }
un.str.WriteString("[")
for i, elem := range elems {
+ if optIndices[i] {
+ un.str.WriteString("?")
+ }
err := un.visit(elem)
if err != nil {
return err
@@ -303,20 +327,32 @@ func (un *unparser) visitList(expr *exprpb.Expr) error {
return nil
}
+func (un *unparser) visitOptSelect(expr *exprpb.Expr) error {
+ c := expr.GetCallExpr()
+ args := c.GetArgs()
+ operand := args[0]
+ field := args[1].GetConstExpr().GetStringValue()
+ return un.visitSelectInternal(operand, false, ".?", field)
+}
+
func (un *unparser) visitSelect(expr *exprpb.Expr) error {
sel := expr.GetSelectExpr()
+ return un.visitSelectInternal(sel.GetOperand(), sel.GetTestOnly(), ".", sel.GetField())
+}
+
+func (un *unparser) visitSelectInternal(operand *exprpb.Expr, testOnly bool, op string, field string) error {
// handle the case when the select expression was generated by the has() macro.
- if sel.GetTestOnly() {
+ if testOnly {
un.str.WriteString("has(")
}
- nested := !sel.GetTestOnly() && isBinaryOrTernaryOperator(sel.GetOperand())
- err := un.visitMaybeNested(sel.GetOperand(), nested)
+ nested := !testOnly && isBinaryOrTernaryOperator(operand)
+ err := un.visitMaybeNested(operand, nested)
if err != nil {
return err
}
- un.str.WriteString(".")
- un.str.WriteString(sel.GetField())
- if sel.GetTestOnly() {
+ un.str.WriteString(op)
+ un.str.WriteString(field)
+ if testOnly {
un.str.WriteString(")")
}
return nil
@@ -339,6 +375,9 @@ func (un *unparser) visitStructMsg(expr *exprpb.Expr) error {
un.str.WriteString("{")
for i, entry := range entries {
f := entry.GetFieldKey()
+ if entry.GetOptionalEntry() {
+ un.str.WriteString("?")
+ }
un.str.WriteString(f)
un.str.WriteString(": ")
v := entry.GetValue()
@@ -360,6 +399,9 @@ func (un *unparser) visitStructMap(expr *exprpb.Expr) error {
un.str.WriteString("{")
for i, entry := range entries {
k := entry.GetMapKey()
+ if entry.GetOptionalEntry() {
+ un.str.WriteString("?")
+ }
err := un.visit(k)
if err != nil {
return err
@@ -492,11 +534,10 @@ func (un *unparser) writeOperatorWithWrapping(fun string, unmangled string) bool
un.str.WriteString(" ")
}
return true
- } else {
- un.str.WriteString(" ")
- un.str.WriteString(unmangled)
- un.str.WriteString(" ")
}
+ un.str.WriteString(" ")
+ un.str.WriteString(unmangled)
+ un.str.WriteString(" ")
return false
}
diff --git a/vendor/github.com/google/gnostic/LICENSE b/vendor/github.com/google/gnostic-models/LICENSE
similarity index 100%
rename from vendor/github.com/google/gnostic/LICENSE
rename to vendor/github.com/google/gnostic-models/LICENSE
diff --git a/vendor/github.com/google/gnostic/compiler/README.md b/vendor/github.com/google/gnostic-models/compiler/README.md
similarity index 100%
rename from vendor/github.com/google/gnostic/compiler/README.md
rename to vendor/github.com/google/gnostic-models/compiler/README.md
diff --git a/vendor/github.com/google/gnostic/compiler/context.go b/vendor/github.com/google/gnostic-models/compiler/context.go
similarity index 100%
rename from vendor/github.com/google/gnostic/compiler/context.go
rename to vendor/github.com/google/gnostic-models/compiler/context.go
diff --git a/vendor/github.com/google/gnostic/compiler/error.go b/vendor/github.com/google/gnostic-models/compiler/error.go
similarity index 100%
rename from vendor/github.com/google/gnostic/compiler/error.go
rename to vendor/github.com/google/gnostic-models/compiler/error.go
diff --git a/vendor/github.com/google/gnostic/compiler/extensions.go b/vendor/github.com/google/gnostic-models/compiler/extensions.go
similarity index 97%
rename from vendor/github.com/google/gnostic/compiler/extensions.go
rename to vendor/github.com/google/gnostic-models/compiler/extensions.go
index 5b5a916d2e..250c81e8c8 100644
--- a/vendor/github.com/google/gnostic/compiler/extensions.go
+++ b/vendor/github.com/google/gnostic-models/compiler/extensions.go
@@ -24,7 +24,7 @@ import (
"github.com/golang/protobuf/ptypes/any"
yaml "gopkg.in/yaml.v3"
- extensions "github.com/google/gnostic/extensions"
+ extensions "github.com/google/gnostic-models/extensions"
)
// ExtensionHandler describes a binary that is called by the compiler to handle specification extensions.
diff --git a/vendor/github.com/google/gnostic/compiler/helpers.go b/vendor/github.com/google/gnostic-models/compiler/helpers.go
similarity index 99%
rename from vendor/github.com/google/gnostic/compiler/helpers.go
rename to vendor/github.com/google/gnostic-models/compiler/helpers.go
index 97ffaa5131..975d65e8f8 100644
--- a/vendor/github.com/google/gnostic/compiler/helpers.go
+++ b/vendor/github.com/google/gnostic-models/compiler/helpers.go
@@ -22,7 +22,7 @@ import (
"gopkg.in/yaml.v3"
- "github.com/google/gnostic/jsonschema"
+ "github.com/google/gnostic-models/jsonschema"
)
// compiler helper functions, usually called from generated code
diff --git a/vendor/github.com/google/gnostic/compiler/main.go b/vendor/github.com/google/gnostic-models/compiler/main.go
similarity index 100%
rename from vendor/github.com/google/gnostic/compiler/main.go
rename to vendor/github.com/google/gnostic-models/compiler/main.go
diff --git a/vendor/github.com/google/gnostic/compiler/reader.go b/vendor/github.com/google/gnostic-models/compiler/reader.go
similarity index 100%
rename from vendor/github.com/google/gnostic/compiler/reader.go
rename to vendor/github.com/google/gnostic-models/compiler/reader.go
diff --git a/vendor/github.com/google/gnostic/extensions/README.md b/vendor/github.com/google/gnostic-models/extensions/README.md
similarity index 100%
rename from vendor/github.com/google/gnostic/extensions/README.md
rename to vendor/github.com/google/gnostic-models/extensions/README.md
diff --git a/vendor/github.com/google/gnostic/extensions/extension.pb.go b/vendor/github.com/google/gnostic-models/extensions/extension.pb.go
similarity index 99%
rename from vendor/github.com/google/gnostic/extensions/extension.pb.go
rename to vendor/github.com/google/gnostic-models/extensions/extension.pb.go
index a6a4ccca6c..a71df8abec 100644
--- a/vendor/github.com/google/gnostic/extensions/extension.pb.go
+++ b/vendor/github.com/google/gnostic-models/extensions/extension.pb.go
@@ -14,8 +14,8 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.26.0
-// protoc v3.18.1
+// protoc-gen-go v1.27.1
+// protoc v3.19.3
// source: extensions/extension.proto
package gnostic_extension_v1
diff --git a/vendor/github.com/google/gnostic/extensions/extension.proto b/vendor/github.com/google/gnostic-models/extensions/extension.proto
similarity index 100%
rename from vendor/github.com/google/gnostic/extensions/extension.proto
rename to vendor/github.com/google/gnostic-models/extensions/extension.proto
diff --git a/vendor/github.com/google/gnostic/extensions/extensions.go b/vendor/github.com/google/gnostic-models/extensions/extensions.go
similarity index 100%
rename from vendor/github.com/google/gnostic/extensions/extensions.go
rename to vendor/github.com/google/gnostic-models/extensions/extensions.go
diff --git a/vendor/github.com/google/gnostic/jsonschema/README.md b/vendor/github.com/google/gnostic-models/jsonschema/README.md
similarity index 100%
rename from vendor/github.com/google/gnostic/jsonschema/README.md
rename to vendor/github.com/google/gnostic-models/jsonschema/README.md
diff --git a/vendor/github.com/google/gnostic/jsonschema/base.go b/vendor/github.com/google/gnostic-models/jsonschema/base.go
similarity index 90%
rename from vendor/github.com/google/gnostic/jsonschema/base.go
rename to vendor/github.com/google/gnostic-models/jsonschema/base.go
index 0af8b148b9..5fcc4885a0 100644
--- a/vendor/github.com/google/gnostic/jsonschema/base.go
+++ b/vendor/github.com/google/gnostic-models/jsonschema/base.go
@@ -1,3 +1,16 @@
+// Copyright 2017 Google LLC. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
// THIS FILE IS AUTOMATICALLY GENERATED.
@@ -81,4 +94,4 @@ YXkiIH0sCiAgICAgICAgImFueU9mIjogeyAiJHJlZiI6ICIjL2RlZmluaXRpb25zL3NjaGVtYUFycmF5
IiB9LAogICAgICAgICJvbmVPZiI6IHsgIiRyZWYiOiAiIy9kZWZpbml0aW9ucy9zY2hlbWFBcnJheSIg
fSwKICAgICAgICAibm90IjogeyAiJHJlZiI6ICIjIiB9CiAgICB9LAogICAgImRlcGVuZGVuY2llcyI6
IHsKICAgICAgICAiZXhjbHVzaXZlTWF4aW11bSI6IFsgIm1heGltdW0iIF0sCiAgICAgICAgImV4Y2x1
-c2l2ZU1pbmltdW0iOiBbICJtaW5pbXVtIiBdCiAgICB9LAogICAgImRlZmF1bHQiOiB7fQp9Cg==`)}
\ No newline at end of file
+c2l2ZU1pbmltdW0iOiBbICJtaW5pbXVtIiBdCiAgICB9LAogICAgImRlZmF1bHQiOiB7fQp9Cg==`)}
diff --git a/vendor/github.com/google/gnostic/jsonschema/display.go b/vendor/github.com/google/gnostic-models/jsonschema/display.go
similarity index 100%
rename from vendor/github.com/google/gnostic/jsonschema/display.go
rename to vendor/github.com/google/gnostic-models/jsonschema/display.go
diff --git a/vendor/github.com/google/gnostic/jsonschema/models.go b/vendor/github.com/google/gnostic-models/jsonschema/models.go
similarity index 100%
rename from vendor/github.com/google/gnostic/jsonschema/models.go
rename to vendor/github.com/google/gnostic-models/jsonschema/models.go
diff --git a/vendor/github.com/google/gnostic/jsonschema/operations.go b/vendor/github.com/google/gnostic-models/jsonschema/operations.go
similarity index 100%
rename from vendor/github.com/google/gnostic/jsonschema/operations.go
rename to vendor/github.com/google/gnostic-models/jsonschema/operations.go
diff --git a/vendor/github.com/google/gnostic/jsonschema/reader.go b/vendor/github.com/google/gnostic-models/jsonschema/reader.go
similarity index 100%
rename from vendor/github.com/google/gnostic/jsonschema/reader.go
rename to vendor/github.com/google/gnostic-models/jsonschema/reader.go
diff --git a/vendor/github.com/google/gnostic/jsonschema/schema.json b/vendor/github.com/google/gnostic-models/jsonschema/schema.json
similarity index 100%
rename from vendor/github.com/google/gnostic/jsonschema/schema.json
rename to vendor/github.com/google/gnostic-models/jsonschema/schema.json
diff --git a/vendor/github.com/google/gnostic/jsonschema/writer.go b/vendor/github.com/google/gnostic-models/jsonschema/writer.go
similarity index 100%
rename from vendor/github.com/google/gnostic/jsonschema/writer.go
rename to vendor/github.com/google/gnostic-models/jsonschema/writer.go
diff --git a/vendor/github.com/google/gnostic/openapiv2/OpenAPIv2.go b/vendor/github.com/google/gnostic-models/openapiv2/OpenAPIv2.go
similarity index 99%
rename from vendor/github.com/google/gnostic/openapiv2/OpenAPIv2.go
rename to vendor/github.com/google/gnostic-models/openapiv2/OpenAPIv2.go
index 0f17907667..d71fe6d545 100644
--- a/vendor/github.com/google/gnostic/openapiv2/OpenAPIv2.go
+++ b/vendor/github.com/google/gnostic-models/openapiv2/OpenAPIv2.go
@@ -23,7 +23,7 @@ import (
"gopkg.in/yaml.v3"
- "github.com/google/gnostic/compiler"
+ "github.com/google/gnostic-models/compiler"
)
// Version returns the package name (and OpenAPI version).
diff --git a/vendor/github.com/google/gnostic/openapiv2/OpenAPIv2.pb.go b/vendor/github.com/google/gnostic-models/openapiv2/OpenAPIv2.pb.go
similarity index 99%
rename from vendor/github.com/google/gnostic/openapiv2/OpenAPIv2.pb.go
rename to vendor/github.com/google/gnostic-models/openapiv2/OpenAPIv2.pb.go
index 06b60157c1..65c4c913ce 100644
--- a/vendor/github.com/google/gnostic/openapiv2/OpenAPIv2.pb.go
+++ b/vendor/github.com/google/gnostic-models/openapiv2/OpenAPIv2.pb.go
@@ -16,8 +16,8 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.26.0
-// protoc v3.18.1
+// protoc-gen-go v1.27.1
+// protoc v3.19.3
// source: openapiv2/OpenAPIv2.proto
package openapi_v2
diff --git a/vendor/github.com/google/gnostic/openapiv2/OpenAPIv2.proto b/vendor/github.com/google/gnostic-models/openapiv2/OpenAPIv2.proto
similarity index 100%
rename from vendor/github.com/google/gnostic/openapiv2/OpenAPIv2.proto
rename to vendor/github.com/google/gnostic-models/openapiv2/OpenAPIv2.proto
diff --git a/vendor/github.com/google/gnostic/openapiv2/README.md b/vendor/github.com/google/gnostic-models/openapiv2/README.md
similarity index 100%
rename from vendor/github.com/google/gnostic/openapiv2/README.md
rename to vendor/github.com/google/gnostic-models/openapiv2/README.md
diff --git a/vendor/github.com/google/gnostic/openapiv2/document.go b/vendor/github.com/google/gnostic-models/openapiv2/document.go
similarity index 96%
rename from vendor/github.com/google/gnostic/openapiv2/document.go
rename to vendor/github.com/google/gnostic-models/openapiv2/document.go
index 0021ae871a..e96ac0d6da 100644
--- a/vendor/github.com/google/gnostic/openapiv2/document.go
+++ b/vendor/github.com/google/gnostic-models/openapiv2/document.go
@@ -17,7 +17,7 @@ package openapi_v2
import (
"gopkg.in/yaml.v3"
- "github.com/google/gnostic/compiler"
+ "github.com/google/gnostic-models/compiler"
)
// ParseDocument reads an OpenAPI v2 description from a YAML/JSON representation.
diff --git a/vendor/github.com/google/gnostic/openapiv2/openapi-2.0.json b/vendor/github.com/google/gnostic-models/openapiv2/openapi-2.0.json
similarity index 100%
rename from vendor/github.com/google/gnostic/openapiv2/openapi-2.0.json
rename to vendor/github.com/google/gnostic-models/openapiv2/openapi-2.0.json
diff --git a/vendor/github.com/google/gnostic/openapiv3/OpenAPIv3.go b/vendor/github.com/google/gnostic-models/openapiv3/OpenAPIv3.go
similarity index 99%
rename from vendor/github.com/google/gnostic/openapiv3/OpenAPIv3.go
rename to vendor/github.com/google/gnostic-models/openapiv3/OpenAPIv3.go
index 5f4a7025ea..4b1131ce1c 100644
--- a/vendor/github.com/google/gnostic/openapiv3/OpenAPIv3.go
+++ b/vendor/github.com/google/gnostic-models/openapiv3/OpenAPIv3.go
@@ -23,7 +23,7 @@ import (
"gopkg.in/yaml.v3"
- "github.com/google/gnostic/compiler"
+ "github.com/google/gnostic-models/compiler"
)
// Version returns the package name (and OpenAPI version).
diff --git a/vendor/github.com/google/gnostic/openapiv3/OpenAPIv3.pb.go b/vendor/github.com/google/gnostic-models/openapiv3/OpenAPIv3.pb.go
similarity index 99%
rename from vendor/github.com/google/gnostic/openapiv3/OpenAPIv3.pb.go
rename to vendor/github.com/google/gnostic-models/openapiv3/OpenAPIv3.pb.go
index 499e7f932d..945b8d11ff 100644
--- a/vendor/github.com/google/gnostic/openapiv3/OpenAPIv3.pb.go
+++ b/vendor/github.com/google/gnostic-models/openapiv3/OpenAPIv3.pb.go
@@ -16,8 +16,8 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.26.0
-// protoc v3.18.1
+// protoc-gen-go v1.27.1
+// protoc v3.19.3
// source: openapiv3/OpenAPIv3.proto
package openapi_v3
diff --git a/vendor/github.com/google/gnostic/openapiv3/OpenAPIv3.proto b/vendor/github.com/google/gnostic-models/openapiv3/OpenAPIv3.proto
similarity index 100%
rename from vendor/github.com/google/gnostic/openapiv3/OpenAPIv3.proto
rename to vendor/github.com/google/gnostic-models/openapiv3/OpenAPIv3.proto
diff --git a/vendor/github.com/google/gnostic/openapiv3/README.md b/vendor/github.com/google/gnostic-models/openapiv3/README.md
similarity index 100%
rename from vendor/github.com/google/gnostic/openapiv3/README.md
rename to vendor/github.com/google/gnostic-models/openapiv3/README.md
diff --git a/vendor/github.com/google/gnostic/openapiv3/document.go b/vendor/github.com/google/gnostic-models/openapiv3/document.go
similarity index 96%
rename from vendor/github.com/google/gnostic/openapiv3/document.go
rename to vendor/github.com/google/gnostic-models/openapiv3/document.go
index ef10d1d909..1cee467735 100644
--- a/vendor/github.com/google/gnostic/openapiv3/document.go
+++ b/vendor/github.com/google/gnostic-models/openapiv3/document.go
@@ -17,7 +17,7 @@ package openapi_v3
import (
"gopkg.in/yaml.v3"
- "github.com/google/gnostic/compiler"
+ "github.com/google/gnostic-models/compiler"
)
// ParseDocument reads an OpenAPI v3 description from a YAML/JSON representation.
diff --git a/vendor/github.com/google/gnostic/openapiv3/openapi-3.0.json b/vendor/github.com/google/gnostic/openapiv3/openapi-3.0.json
deleted file mode 100644
index d5caed162d..0000000000
--- a/vendor/github.com/google/gnostic/openapiv3/openapi-3.0.json
+++ /dev/null
@@ -1,1251 +0,0 @@
-{
- "title": "A JSON Schema for OpenAPI 3.0.",
- "id": "http://openapis.org/v3/schema.json#",
- "$schema": "http://json-schema.org/draft-04/schema#",
- "type": "object",
- "description": "This is the root document object of the OpenAPI document.",
- "required": [
- "openapi",
- "info",
- "paths"
- ],
- "additionalProperties": false,
- "patternProperties": {
- "^x-": {
- "$ref": "#/definitions/specificationExtension"
- }
- },
- "properties": {
- "openapi": {
- "type": "string"
- },
- "info": {
- "$ref": "#/definitions/info"
- },
- "servers": {
- "type": "array",
- "items": {
- "$ref": "#/definitions/server"
- },
- "uniqueItems": true
- },
- "paths": {
- "$ref": "#/definitions/paths"
- },
- "components": {
- "$ref": "#/definitions/components"
- },
- "security": {
- "type": "array",
- "items": {
- "$ref": "#/definitions/securityRequirement"
- },
- "uniqueItems": true
- },
- "tags": {
- "type": "array",
- "items": {
- "$ref": "#/definitions/tag"
- },
- "uniqueItems": true
- },
- "externalDocs": {
- "$ref": "#/definitions/externalDocs"
- }
- },
- "definitions": {
- "info": {
- "type": "object",
- "description": "The object provides metadata about the API. The metadata MAY be used by the clients if needed, and MAY be presented in editing or documentation generation tools for convenience.",
- "required": [
- "title",
- "version"
- ],
- "additionalProperties": false,
- "patternProperties": {
- "^x-": {
- "$ref": "#/definitions/specificationExtension"
- }
- },
- "properties": {
- "title": {
- "type": "string"
- },
- "description": {
- "type": "string"
- },
- "termsOfService": {
- "type": "string"
- },
- "contact": {
- "$ref": "#/definitions/contact"
- },
- "license": {
- "$ref": "#/definitions/license"
- },
- "version": {
- "type": "string"
- }
- }
- },
- "contact": {
- "type": "object",
- "description": "Contact information for the exposed API.",
- "additionalProperties": false,
- "patternProperties": {
- "^x-": {
- "$ref": "#/definitions/specificationExtension"
- }
- },
- "properties": {
- "name": {
- "type": "string"
- },
- "url": {
- "type": "string",
- "format": "uri"
- },
- "email": {
- "type": "string",
- "format": "email"
- }
- }
- },
- "license": {
- "type": "object",
- "description": "License information for the exposed API.",
- "required": [
- "name"
- ],
- "additionalProperties": false,
- "patternProperties": {
- "^x-": {
- "$ref": "#/definitions/specificationExtension"
- }
- },
- "properties": {
- "name": {
- "type": "string"
- },
- "url": {
- "type": "string"
- }
- }
- },
- "server": {
- "type": "object",
- "description": "An object representing a Server.",
- "required": [
- "url"
- ],
- "additionalProperties": false,
- "patternProperties": {
- "^x-": {
- "$ref": "#/definitions/specificationExtension"
- }
- },
- "properties": {
- "url": {
- "type": "string"
- },
- "description": {
- "type": "string"
- },
- "variables": {
- "$ref": "#/definitions/serverVariables"
- }
- }
- },
- "serverVariable": {
- "type": "object",
- "description": "An object representing a Server Variable for server URL template substitution.",
- "required": [
- "default"
- ],
- "additionalProperties": false,
- "patternProperties": {
- "^x-": {
- "$ref": "#/definitions/specificationExtension"
- }
- },
- "properties": {
- "enum": {
- "type": "array",
- "items": {
- "type": "string"
- },
- "uniqueItems": true
- },
- "default": {
- "type": "string"
- },
- "description": {
- "type": "string"
- }
- }
- },
- "components": {
- "type": "object",
- "description": "Holds a set of reusable objects for different aspects of the OAS. All objects defined within the components object will have no effect on the API unless they are explicitly referenced from properties outside the components object.",
- "additionalProperties": false,
- "patternProperties": {
- "^x-": {
- "$ref": "#/definitions/specificationExtension"
- }
- },
- "properties": {
- "schemas": {
- "$ref": "#/definitions/schemasOrReferences"
- },
- "responses": {
- "$ref": "#/definitions/responsesOrReferences"
- },
- "parameters": {
- "$ref": "#/definitions/parametersOrReferences"
- },
- "examples": {
- "$ref": "#/definitions/examplesOrReferences"
- },
- "requestBodies": {
- "$ref": "#/definitions/requestBodiesOrReferences"
- },
- "headers": {
- "$ref": "#/definitions/headersOrReferences"
- },
- "securitySchemes": {
- "$ref": "#/definitions/securitySchemesOrReferences"
- },
- "links": {
- "$ref": "#/definitions/linksOrReferences"
- },
- "callbacks": {
- "$ref": "#/definitions/callbacksOrReferences"
- }
- }
- },
- "paths": {
- "type": "object",
- "description": "Holds the relative paths to the individual endpoints and their operations. The path is appended to the URL from the `Server Object` in order to construct the full URL. The Paths MAY be empty, due to ACL constraints.",
- "additionalProperties": false,
- "patternProperties": {
- "^/": {
- "$ref": "#/definitions/pathItem"
- },
- "^x-": {
- "$ref": "#/definitions/specificationExtension"
- }
- }
- },
- "pathItem": {
- "type": "object",
- "description": "Describes the operations available on a single path. A Path Item MAY be empty, due to ACL constraints. The path itself is still exposed to the documentation viewer but they will not know which operations and parameters are available.",
- "additionalProperties": false,
- "patternProperties": {
- "^x-": {
- "$ref": "#/definitions/specificationExtension"
- }
- },
- "properties": {
- "$ref": {
- "type": "string"
- },
- "summary": {
- "type": "string"
- },
- "description": {
- "type": "string"
- },
- "get": {
- "$ref": "#/definitions/operation"
- },
- "put": {
- "$ref": "#/definitions/operation"
- },
- "post": {
- "$ref": "#/definitions/operation"
- },
- "delete": {
- "$ref": "#/definitions/operation"
- },
- "options": {
- "$ref": "#/definitions/operation"
- },
- "head": {
- "$ref": "#/definitions/operation"
- },
- "patch": {
- "$ref": "#/definitions/operation"
- },
- "trace": {
- "$ref": "#/definitions/operation"
- },
- "servers": {
- "type": "array",
- "items": {
- "$ref": "#/definitions/server"
- },
- "uniqueItems": true
- },
- "parameters": {
- "type": "array",
- "items": {
- "$ref": "#/definitions/parameterOrReference"
- },
- "uniqueItems": true
- }
- }
- },
- "operation": {
- "type": "object",
- "description": "Describes a single API operation on a path.",
- "required": [
- "responses"
- ],
- "additionalProperties": false,
- "patternProperties": {
- "^x-": {
- "$ref": "#/definitions/specificationExtension"
- }
- },
- "properties": {
- "tags": {
- "type": "array",
- "items": {
- "type": "string"
- },
- "uniqueItems": true
- },
- "summary": {
- "type": "string"
- },
- "description": {
- "type": "string"
- },
- "externalDocs": {
- "$ref": "#/definitions/externalDocs"
- },
- "operationId": {
- "type": "string"
- },
- "parameters": {
- "type": "array",
- "items": {
- "$ref": "#/definitions/parameterOrReference"
- },
- "uniqueItems": true
- },
- "requestBody": {
- "$ref": "#/definitions/requestBodyOrReference"
- },
- "responses": {
- "$ref": "#/definitions/responses"
- },
- "callbacks": {
- "$ref": "#/definitions/callbacksOrReferences"
- },
- "deprecated": {
- "type": "boolean"
- },
- "security": {
- "type": "array",
- "items": {
- "$ref": "#/definitions/securityRequirement"
- },
- "uniqueItems": true
- },
- "servers": {
- "type": "array",
- "items": {
- "$ref": "#/definitions/server"
- },
- "uniqueItems": true
- }
- }
- },
- "externalDocs": {
- "type": "object",
- "description": "Allows referencing an external resource for extended documentation.",
- "required": [
- "url"
- ],
- "additionalProperties": false,
- "patternProperties": {
- "^x-": {
- "$ref": "#/definitions/specificationExtension"
- }
- },
- "properties": {
- "description": {
- "type": "string"
- },
- "url": {
- "type": "string"
- }
- }
- },
- "parameter": {
- "type": "object",
- "description": "Describes a single operation parameter. A unique parameter is defined by a combination of a name and location.",
- "required": [
- "name",
- "in"
- ],
- "additionalProperties": false,
- "patternProperties": {
- "^x-": {
- "$ref": "#/definitions/specificationExtension"
- }
- },
- "properties": {
- "name": {
- "type": "string"
- },
- "in": {
- "type": "string"
- },
- "description": {
- "type": "string"
- },
- "required": {
- "type": "boolean"
- },
- "deprecated": {
- "type": "boolean"
- },
- "allowEmptyValue": {
- "type": "boolean"
- },
- "style": {
- "type": "string"
- },
- "explode": {
- "type": "boolean"
- },
- "allowReserved": {
- "type": "boolean"
- },
- "schema": {
- "$ref": "#/definitions/schemaOrReference"
- },
- "example": {
- "$ref": "#/definitions/any"
- },
- "examples": {
- "$ref": "#/definitions/examplesOrReferences"
- },
- "content": {
- "$ref": "#/definitions/mediaTypes"
- }
- }
- },
- "requestBody": {
- "type": "object",
- "description": "Describes a single request body.",
- "required": [
- "content"
- ],
- "additionalProperties": false,
- "patternProperties": {
- "^x-": {
- "$ref": "#/definitions/specificationExtension"
- }
- },
- "properties": {
- "description": {
- "type": "string"
- },
- "content": {
- "$ref": "#/definitions/mediaTypes"
- },
- "required": {
- "type": "boolean"
- }
- }
- },
- "mediaType": {
- "type": "object",
- "description": "Each Media Type Object provides schema and examples for the media type identified by its key.",
- "additionalProperties": false,
- "patternProperties": {
- "^x-": {
- "$ref": "#/definitions/specificationExtension"
- }
- },
- "properties": {
- "schema": {
- "$ref": "#/definitions/schemaOrReference"
- },
- "example": {
- "$ref": "#/definitions/any"
- },
- "examples": {
- "$ref": "#/definitions/examplesOrReferences"
- },
- "encoding": {
- "$ref": "#/definitions/encodings"
- }
- }
- },
- "encoding": {
- "type": "object",
- "description": "A single encoding definition applied to a single schema property.",
- "additionalProperties": false,
- "patternProperties": {
- "^x-": {
- "$ref": "#/definitions/specificationExtension"
- }
- },
- "properties": {
- "contentType": {
- "type": "string"
- },
- "headers": {
- "$ref": "#/definitions/headersOrReferences"
- },
- "style": {
- "type": "string"
- },
- "explode": {
- "type": "boolean"
- },
- "allowReserved": {
- "type": "boolean"
- }
- }
- },
- "responses": {
- "type": "object",
- "description": "A container for the expected responses of an operation. The container maps a HTTP response code to the expected response. The documentation is not necessarily expected to cover all possible HTTP response codes because they may not be known in advance. However, documentation is expected to cover a successful operation response and any known errors. The `default` MAY be used as a default response object for all HTTP codes that are not covered individually by the specification. The `Responses Object` MUST contain at least one response code, and it SHOULD be the response for a successful operation call.",
- "additionalProperties": false,
- "patternProperties": {
- "^([0-9X]{3})$": {
- "$ref": "#/definitions/responseOrReference"
- },
- "^x-": {
- "$ref": "#/definitions/specificationExtension"
- }
- },
- "properties": {
- "default": {
- "$ref": "#/definitions/responseOrReference"
- }
- }
- },
- "response": {
- "type": "object",
- "description": "Describes a single response from an API Operation, including design-time, static `links` to operations based on the response.",
- "required": [
- "description"
- ],
- "additionalProperties": false,
- "patternProperties": {
- "^x-": {
- "$ref": "#/definitions/specificationExtension"
- }
- },
- "properties": {
- "description": {
- "type": "string"
- },
- "headers": {
- "$ref": "#/definitions/headersOrReferences"
- },
- "content": {
- "$ref": "#/definitions/mediaTypes"
- },
- "links": {
- "$ref": "#/definitions/linksOrReferences"
- }
- }
- },
- "callback": {
- "type": "object",
- "description": "A map of possible out-of band callbacks related to the parent operation. Each value in the map is a Path Item Object that describes a set of requests that may be initiated by the API provider and the expected responses. The key value used to identify the callback object is an expression, evaluated at runtime, that identifies a URL to use for the callback operation.",
- "additionalProperties": false,
- "patternProperties": {
- "^": {
- "$ref": "#/definitions/pathItem"
- },
- "^x-": {
- "$ref": "#/definitions/specificationExtension"
- }
- }
- },
- "example": {
- "type": "object",
- "description": "",
- "additionalProperties": false,
- "patternProperties": {
- "^x-": {
- "$ref": "#/definitions/specificationExtension"
- }
- },
- "properties": {
- "summary": {
- "type": "string"
- },
- "description": {
- "type": "string"
- },
- "value": {
- "$ref": "#/definitions/any"
- },
- "externalValue": {
- "type": "string"
- }
- }
- },
- "link": {
- "type": "object",
- "description": "The `Link object` represents a possible design-time link for a response. The presence of a link does not guarantee the caller's ability to successfully invoke it, rather it provides a known relationship and traversal mechanism between responses and other operations. Unlike _dynamic_ links (i.e. links provided **in** the response payload), the OAS linking mechanism does not require link information in the runtime response. For computing links, and providing instructions to execute them, a runtime expression is used for accessing values in an operation and using them as parameters while invoking the linked operation.",
- "additionalProperties": false,
- "patternProperties": {
- "^x-": {
- "$ref": "#/definitions/specificationExtension"
- }
- },
- "properties": {
- "operationRef": {
- "type": "string"
- },
- "operationId": {
- "type": "string"
- },
- "parameters": {
- "$ref": "#/definitions/anysOrExpressions"
- },
- "requestBody": {
- "$ref": "#/definitions/anyOrExpression"
- },
- "description": {
- "type": "string"
- },
- "server": {
- "$ref": "#/definitions/server"
- }
- }
- },
- "header": {
- "type": "object",
- "description": "The Header Object follows the structure of the Parameter Object with the following changes: 1. `name` MUST NOT be specified, it is given in the corresponding `headers` map. 1. `in` MUST NOT be specified, it is implicitly in `header`. 1. All traits that are affected by the location MUST be applicable to a location of `header` (for example, `style`).",
- "additionalProperties": false,
- "patternProperties": {
- "^x-": {
- "$ref": "#/definitions/specificationExtension"
- }
- },
- "properties": {
- "description": {
- "type": "string"
- },
- "required": {
- "type": "boolean"
- },
- "deprecated": {
- "type": "boolean"
- },
- "allowEmptyValue": {
- "type": "boolean"
- },
- "style": {
- "type": "string"
- },
- "explode": {
- "type": "boolean"
- },
- "allowReserved": {
- "type": "boolean"
- },
- "schema": {
- "$ref": "#/definitions/schemaOrReference"
- },
- "example": {
- "$ref": "#/definitions/any"
- },
- "examples": {
- "$ref": "#/definitions/examplesOrReferences"
- },
- "content": {
- "$ref": "#/definitions/mediaTypes"
- }
- }
- },
- "tag": {
- "type": "object",
- "description": "Adds metadata to a single tag that is used by the Operation Object. It is not mandatory to have a Tag Object per tag defined in the Operation Object instances.",
- "required": [
- "name"
- ],
- "additionalProperties": false,
- "patternProperties": {
- "^x-": {
- "$ref": "#/definitions/specificationExtension"
- }
- },
- "properties": {
- "name": {
- "type": "string"
- },
- "description": {
- "type": "string"
- },
- "externalDocs": {
- "$ref": "#/definitions/externalDocs"
- }
- }
- },
- "reference": {
- "type": "object",
- "description": "A simple object to allow referencing other components in the specification, internally and externally. The Reference Object is defined by JSON Reference and follows the same structure, behavior and rules. For this specification, reference resolution is accomplished as defined by the JSON Reference specification and not by the JSON Schema specification.",
- "required": [
- "$ref"
- ],
- "additionalProperties": false,
- "properties": {
- "$ref": {
- "type": "string"
- },
- "summary": {
- "type": "string"
- },
- "description": {
- "type": "string"
- }
- }
- },
- "schema": {
- "type": "object",
- "description": "The Schema Object allows the definition of input and output data types. These types can be objects, but also primitives and arrays. This object is an extended subset of the JSON Schema Specification Wright Draft 00. For more information about the properties, see JSON Schema Core and JSON Schema Validation. Unless stated otherwise, the property definitions follow the JSON Schema.",
- "additionalProperties": false,
- "patternProperties": {
- "^x-": {
- "$ref": "#/definitions/specificationExtension"
- }
- },
- "properties": {
- "nullable": {
- "type": "boolean"
- },
- "discriminator": {
- "$ref": "#/definitions/discriminator"
- },
- "readOnly": {
- "type": "boolean"
- },
- "writeOnly": {
- "type": "boolean"
- },
- "xml": {
- "$ref": "#/definitions/xml"
- },
- "externalDocs": {
- "$ref": "#/definitions/externalDocs"
- },
- "example": {
- "$ref": "#/definitions/any"
- },
- "deprecated": {
- "type": "boolean"
- },
- "title": {
- "$ref": "http://json-schema.org/draft-04/schema#/properties/title"
- },
- "multipleOf": {
- "$ref": "http://json-schema.org/draft-04/schema#/properties/multipleOf"
- },
- "maximum": {
- "$ref": "http://json-schema.org/draft-04/schema#/properties/maximum"
- },
- "exclusiveMaximum": {
- "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMaximum"
- },
- "minimum": {
- "$ref": "http://json-schema.org/draft-04/schema#/properties/minimum"
- },
- "exclusiveMinimum": {
- "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMinimum"
- },
- "maxLength": {
- "$ref": "http://json-schema.org/draft-04/schema#/properties/maxLength"
- },
- "minLength": {
- "$ref": "http://json-schema.org/draft-04/schema#/properties/minLength"
- },
- "pattern": {
- "$ref": "http://json-schema.org/draft-04/schema#/properties/pattern"
- },
- "maxItems": {
- "$ref": "http://json-schema.org/draft-04/schema#/properties/maxItems"
- },
- "minItems": {
- "$ref": "http://json-schema.org/draft-04/schema#/properties/minItems"
- },
- "uniqueItems": {
- "$ref": "http://json-schema.org/draft-04/schema#/properties/uniqueItems"
- },
- "maxProperties": {
- "$ref": "http://json-schema.org/draft-04/schema#/properties/maxProperties"
- },
- "minProperties": {
- "$ref": "http://json-schema.org/draft-04/schema#/properties/minProperties"
- },
- "required": {
- "$ref": "http://json-schema.org/draft-04/schema#/properties/required"
- },
- "enum": {
- "$ref": "http://json-schema.org/draft-04/schema#/properties/enum"
- },
- "type": {
- "type": "string"
- },
- "allOf": {
- "type": "array",
- "items": {
- "$ref": "#/definitions/schemaOrReference"
- },
- "minItems": 1
- },
- "oneOf": {
- "type": "array",
- "items": {
- "$ref": "#/definitions/schemaOrReference"
- },
- "minItems": 1
- },
- "anyOf": {
- "type": "array",
- "items": {
- "$ref": "#/definitions/schemaOrReference"
- },
- "minItems": 1
- },
- "not": {
- "$ref": "#/definitions/schema"
- },
- "items": {
- "anyOf": [
- {
- "$ref": "#/definitions/schemaOrReference"
- },
- {
- "type": "array",
- "items": {
- "$ref": "#/definitions/schemaOrReference"
- },
- "minItems": 1
- }
- ]
- },
- "properties": {
- "type": "object",
- "additionalProperties": {
- "$ref": "#/definitions/schemaOrReference"
- }
- },
- "additionalProperties": {
- "oneOf": [
- {
- "$ref": "#/definitions/schemaOrReference"
- },
- {
- "type": "boolean"
- }
- ]
- },
- "default": {
- "$ref": "#/definitions/defaultType"
- },
- "description": {
- "type": "string"
- },
- "format": {
- "type": "string"
- }
- }
- },
- "discriminator": {
- "type": "object",
- "description": "When request bodies or response payloads may be one of a number of different schemas, a `discriminator` object can be used to aid in serialization, deserialization, and validation. The discriminator is a specific object in a schema which is used to inform the consumer of the specification of an alternative schema based on the value associated with it. When using the discriminator, _inline_ schemas will not be considered.",
- "required": [
- "propertyName"
- ],
- "additionalProperties": false,
- "properties": {
- "propertyName": {
- "type": "string"
- },
- "mapping": {
- "$ref": "#/definitions/strings"
- }
- }
- },
- "xml": {
- "type": "object",
- "description": "A metadata object that allows for more fine-tuned XML model definitions. When using arrays, XML element names are *not* inferred (for singular/plural forms) and the `name` property SHOULD be used to add that information. See examples for expected behavior.",
- "additionalProperties": false,
- "patternProperties": {
- "^x-": {
- "$ref": "#/definitions/specificationExtension"
- }
- },
- "properties": {
- "name": {
- "type": "string"
- },
- "namespace": {
- "type": "string"
- },
- "prefix": {
- "type": "string"
- },
- "attribute": {
- "type": "boolean"
- },
- "wrapped": {
- "type": "boolean"
- }
- }
- },
- "securityScheme": {
- "type": "object",
- "description": "Defines a security scheme that can be used by the operations. Supported schemes are HTTP authentication, an API key (either as a header or as a query parameter), OAuth2's common flows (implicit, password, application and access code) as defined in RFC6749, and OpenID Connect Discovery.",
- "required": [
- "type"
- ],
- "additionalProperties": false,
- "patternProperties": {
- "^x-": {
- "$ref": "#/definitions/specificationExtension"
- }
- },
- "properties": {
- "type": {
- "type": "string"
- },
- "description": {
- "type": "string"
- },
- "name": {
- "type": "string"
- },
- "in": {
- "type": "string"
- },
- "scheme": {
- "type": "string"
- },
- "bearerFormat": {
- "type": "string"
- },
- "flows": {
- "$ref": "#/definitions/oauthFlows"
- },
- "openIdConnectUrl": {
- "type": "string"
- }
- }
- },
- "oauthFlows": {
- "type": "object",
- "description": "Allows configuration of the supported OAuth Flows.",
- "additionalProperties": false,
- "patternProperties": {
- "^x-": {
- "$ref": "#/definitions/specificationExtension"
- }
- },
- "properties": {
- "implicit": {
- "$ref": "#/definitions/oauthFlow"
- },
- "password": {
- "$ref": "#/definitions/oauthFlow"
- },
- "clientCredentials": {
- "$ref": "#/definitions/oauthFlow"
- },
- "authorizationCode": {
- "$ref": "#/definitions/oauthFlow"
- }
- }
- },
- "oauthFlow": {
- "type": "object",
- "description": "Configuration details for a supported OAuth Flow",
- "additionalProperties": false,
- "patternProperties": {
- "^x-": {
- "$ref": "#/definitions/specificationExtension"
- }
- },
- "properties": {
- "authorizationUrl": {
- "type": "string"
- },
- "tokenUrl": {
- "type": "string"
- },
- "refreshUrl": {
- "type": "string"
- },
- "scopes": {
- "$ref": "#/definitions/strings"
- }
- }
- },
- "securityRequirement": {
- "type": "object",
- "description": "Lists the required security schemes to execute this operation. The name used for each property MUST correspond to a security scheme declared in the Security Schemes under the Components Object. Security Requirement Objects that contain multiple schemes require that all schemes MUST be satisfied for a request to be authorized. This enables support for scenarios where multiple query parameters or HTTP headers are required to convey security information. When a list of Security Requirement Objects is defined on the Open API object or Operation Object, only one of Security Requirement Objects in the list needs to be satisfied to authorize the request.",
- "additionalProperties": false,
- "patternProperties": {
- "^[a-zA-Z0-9\\.\\-_]+$": {
- "type": "array",
- "items": {
- "type": "string"
- },
- "uniqueItems": true
- }
- }
- },
- "anyOrExpression": {
- "oneOf": [
- {
- "$ref": "#/definitions/any"
- },
- {
- "$ref": "#/definitions/expression"
- }
- ]
- },
- "callbackOrReference": {
- "oneOf": [
- {
- "$ref": "#/definitions/callback"
- },
- {
- "$ref": "#/definitions/reference"
- }
- ]
- },
- "exampleOrReference": {
- "oneOf": [
- {
- "$ref": "#/definitions/example"
- },
- {
- "$ref": "#/definitions/reference"
- }
- ]
- },
- "headerOrReference": {
- "oneOf": [
- {
- "$ref": "#/definitions/header"
- },
- {
- "$ref": "#/definitions/reference"
- }
- ]
- },
- "linkOrReference": {
- "oneOf": [
- {
- "$ref": "#/definitions/link"
- },
- {
- "$ref": "#/definitions/reference"
- }
- ]
- },
- "parameterOrReference": {
- "oneOf": [
- {
- "$ref": "#/definitions/parameter"
- },
- {
- "$ref": "#/definitions/reference"
- }
- ]
- },
- "requestBodyOrReference": {
- "oneOf": [
- {
- "$ref": "#/definitions/requestBody"
- },
- {
- "$ref": "#/definitions/reference"
- }
- ]
- },
- "responseOrReference": {
- "oneOf": [
- {
- "$ref": "#/definitions/response"
- },
- {
- "$ref": "#/definitions/reference"
- }
- ]
- },
- "schemaOrReference": {
- "oneOf": [
- {
- "$ref": "#/definitions/schema"
- },
- {
- "$ref": "#/definitions/reference"
- }
- ]
- },
- "securitySchemeOrReference": {
- "oneOf": [
- {
- "$ref": "#/definitions/securityScheme"
- },
- {
- "$ref": "#/definitions/reference"
- }
- ]
- },
- "anysOrExpressions": {
- "type": "object",
- "additionalProperties": {
- "$ref": "#/definitions/anyOrExpression"
- }
- },
- "callbacksOrReferences": {
- "type": "object",
- "additionalProperties": {
- "$ref": "#/definitions/callbackOrReference"
- }
- },
- "encodings": {
- "type": "object",
- "additionalProperties": {
- "$ref": "#/definitions/encoding"
- }
- },
- "examplesOrReferences": {
- "type": "object",
- "additionalProperties": {
- "$ref": "#/definitions/exampleOrReference"
- }
- },
- "headersOrReferences": {
- "type": "object",
- "additionalProperties": {
- "$ref": "#/definitions/headerOrReference"
- }
- },
- "linksOrReferences": {
- "type": "object",
- "additionalProperties": {
- "$ref": "#/definitions/linkOrReference"
- }
- },
- "mediaTypes": {
- "type": "object",
- "additionalProperties": {
- "$ref": "#/definitions/mediaType"
- }
- },
- "parametersOrReferences": {
- "type": "object",
- "additionalProperties": {
- "$ref": "#/definitions/parameterOrReference"
- }
- },
- "requestBodiesOrReferences": {
- "type": "object",
- "additionalProperties": {
- "$ref": "#/definitions/requestBodyOrReference"
- }
- },
- "responsesOrReferences": {
- "type": "object",
- "additionalProperties": {
- "$ref": "#/definitions/responseOrReference"
- }
- },
- "schemasOrReferences": {
- "type": "object",
- "additionalProperties": {
- "$ref": "#/definitions/schemaOrReference"
- }
- },
- "securitySchemesOrReferences": {
- "type": "object",
- "additionalProperties": {
- "$ref": "#/definitions/securitySchemeOrReference"
- }
- },
- "serverVariables": {
- "type": "object",
- "additionalProperties": {
- "$ref": "#/definitions/serverVariable"
- }
- },
- "strings": {
- "type": "object",
- "additionalProperties": {
- "type": "string"
- }
- },
- "object": {
- "type": "object",
- "additionalProperties": true
- },
- "any": {
- "additionalProperties": true
- },
- "expression": {
- "type": "object",
- "additionalProperties": true
- },
- "specificationExtension": {
- "description": "Any property starting with x- is valid.",
- "oneOf": [
- {
- "type": "null"
- },
- {
- "type": "number"
- },
- {
- "type": "boolean"
- },
- {
- "type": "string"
- },
- {
- "type": "object"
- },
- {
- "type": "array"
- }
- ]
- },
- "defaultType": {
- "oneOf": [
- {
- "type": "null"
- },
- {
- "type": "array"
- },
- {
- "type": "object"
- },
- {
- "type": "number"
- },
- {
- "type": "boolean"
- },
- {
- "type": "string"
- }
- ]
- }
- }
-}
diff --git a/vendor/github.com/google/gnostic/openapiv3/openapi-3.1.json b/vendor/github.com/google/gnostic/openapiv3/openapi-3.1.json
deleted file mode 100644
index ed0b83adf4..0000000000
--- a/vendor/github.com/google/gnostic/openapiv3/openapi-3.1.json
+++ /dev/null
@@ -1,1250 +0,0 @@
-{
- "title": "A JSON Schema for OpenAPI 3.0.",
- "id": "http://openapis.org/v3/schema.json#",
- "$schema": "http://json-schema.org/draft-04/schema#",
- "type": "object",
- "description": "This is the root document object of the OpenAPI document.",
- "required": [
- "openapi",
- "info",
- "paths"
- ],
- "additionalProperties": false,
- "patternProperties": {
- "^x-": {
- "$ref": "#/definitions/specificationExtension"
- }
- },
- "properties": {
- "openapi": {
- "type": "string"
- },
- "info": {
- "$ref": "#/definitions/info"
- },
- "servers": {
- "type": "array",
- "items": {
- "$ref": "#/definitions/server"
- },
- "uniqueItems": true
- },
- "paths": {
- "$ref": "#/definitions/paths"
- },
- "components": {
- "$ref": "#/definitions/components"
- },
- "security": {
- "type": "array",
- "items": {
- "$ref": "#/definitions/securityRequirement"
- },
- "uniqueItems": true
- },
- "tags": {
- "type": "array",
- "items": {
- "$ref": "#/definitions/tag"
- },
- "uniqueItems": true
- },
- "externalDocs": {
- "$ref": "#/definitions/externalDocs"
- }
- },
- "definitions": {
- "info": {
- "type": "object",
- "description": "The object provides metadata about the API. The metadata MAY be used by the clients if needed, and MAY be presented in editing or documentation generation tools for convenience.",
- "required": [
- "title",
- "version"
- ],
- "additionalProperties": false,
- "patternProperties": {
- "^x-": {
- "$ref": "#/definitions/specificationExtension"
- }
- },
- "properties": {
- "title": {
- "type": "string"
- },
- "description": {
- "type": "string"
- },
- "termsOfService": {
- "type": "string"
- },
- "contact": {
- "$ref": "#/definitions/contact"
- },
- "license": {
- "$ref": "#/definitions/license"
- },
- "version": {
- "type": "string"
- },
- "summary": {
- "type": "string"
- }
- }
- },
- "contact": {
- "type": "object",
- "description": "Contact information for the exposed API.",
- "additionalProperties": false,
- "patternProperties": {
- "^x-": {
- "$ref": "#/definitions/specificationExtension"
- }
- },
- "properties": {
- "name": {
- "type": "string"
- },
- "url": {
- "type": "string",
- "format": "uri"
- },
- "email": {
- "type": "string",
- "format": "email"
- }
- }
- },
- "license": {
- "type": "object",
- "description": "License information for the exposed API.",
- "required": [
- "name"
- ],
- "additionalProperties": false,
- "patternProperties": {
- "^x-": {
- "$ref": "#/definitions/specificationExtension"
- }
- },
- "properties": {
- "name": {
- "type": "string"
- },
- "url": {
- "type": "string"
- }
- }
- },
- "server": {
- "type": "object",
- "description": "An object representing a Server.",
- "required": [
- "url"
- ],
- "additionalProperties": false,
- "patternProperties": {
- "^x-": {
- "$ref": "#/definitions/specificationExtension"
- }
- },
- "properties": {
- "url": {
- "type": "string"
- },
- "description": {
- "type": "string"
- },
- "variables": {
- "$ref": "#/definitions/serverVariables"
- }
- }
- },
- "serverVariable": {
- "type": "object",
- "description": "An object representing a Server Variable for server URL template substitution.",
- "required": [
- "default"
- ],
- "additionalProperties": false,
- "patternProperties": {
- "^x-": {
- "$ref": "#/definitions/specificationExtension"
- }
- },
- "properties": {
- "enum": {
- "type": "array",
- "items": {
- "type": "string"
- },
- "uniqueItems": true
- },
- "default": {
- "type": "string"
- },
- "description": {
- "type": "string"
- }
- }
- },
- "components": {
- "type": "object",
- "description": "Holds a set of reusable objects for different aspects of the OAS. All objects defined within the components object will have no effect on the API unless they are explicitly referenced from properties outside the components object.",
- "additionalProperties": false,
- "patternProperties": {
- "^x-": {
- "$ref": "#/definitions/specificationExtension"
- }
- },
- "properties": {
- "schemas": {
- "$ref": "#/definitions/schemasOrReferences"
- },
- "responses": {
- "$ref": "#/definitions/responsesOrReferences"
- },
- "parameters": {
- "$ref": "#/definitions/parametersOrReferences"
- },
- "examples": {
- "$ref": "#/definitions/examplesOrReferences"
- },
- "requestBodies": {
- "$ref": "#/definitions/requestBodiesOrReferences"
- },
- "headers": {
- "$ref": "#/definitions/headersOrReferences"
- },
- "securitySchemes": {
- "$ref": "#/definitions/securitySchemesOrReferences"
- },
- "links": {
- "$ref": "#/definitions/linksOrReferences"
- },
- "callbacks": {
- "$ref": "#/definitions/callbacksOrReferences"
- }
- }
- },
- "paths": {
- "type": "object",
- "description": "Holds the relative paths to the individual endpoints and their operations. The path is appended to the URL from the `Server Object` in order to construct the full URL. The Paths MAY be empty, due to ACL constraints.",
- "additionalProperties": false,
- "patternProperties": {
- "^/": {
- "$ref": "#/definitions/pathItem"
- },
- "^x-": {
- "$ref": "#/definitions/specificationExtension"
- }
- }
- },
- "pathItem": {
- "type": "object",
- "description": "Describes the operations available on a single path. A Path Item MAY be empty, due to ACL constraints. The path itself is still exposed to the documentation viewer but they will not know which operations and parameters are available.",
- "additionalProperties": false,
- "patternProperties": {
- "^x-": {
- "$ref": "#/definitions/specificationExtension"
- }
- },
- "properties": {
- "$ref": {
- "type": "string"
- },
- "summary": {
- "type": "string"
- },
- "description": {
- "type": "string"
- },
- "get": {
- "$ref": "#/definitions/operation"
- },
- "put": {
- "$ref": "#/definitions/operation"
- },
- "post": {
- "$ref": "#/definitions/operation"
- },
- "delete": {
- "$ref": "#/definitions/operation"
- },
- "options": {
- "$ref": "#/definitions/operation"
- },
- "head": {
- "$ref": "#/definitions/operation"
- },
- "patch": {
- "$ref": "#/definitions/operation"
- },
- "trace": {
- "$ref": "#/definitions/operation"
- },
- "servers": {
- "type": "array",
- "items": {
- "$ref": "#/definitions/server"
- },
- "uniqueItems": true
- },
- "parameters": {
- "type": "array",
- "items": {
- "$ref": "#/definitions/parameterOrReference"
- },
- "uniqueItems": true
- }
- }
- },
- "operation": {
- "type": "object",
- "description": "Describes a single API operation on a path.",
- "required": [
- "responses"
- ],
- "additionalProperties": false,
- "patternProperties": {
- "^x-": {
- "$ref": "#/definitions/specificationExtension"
- }
- },
- "properties": {
- "tags": {
- "type": "array",
- "items": {
- "type": "string"
- },
- "uniqueItems": true
- },
- "summary": {
- "type": "string"
- },
- "description": {
- "type": "string"
- },
- "externalDocs": {
- "$ref": "#/definitions/externalDocs"
- },
- "operationId": {
- "type": "string"
- },
- "parameters": {
- "type": "array",
- "items": {
- "$ref": "#/definitions/parameterOrReference"
- },
- "uniqueItems": true
- },
- "requestBody": {
- "$ref": "#/definitions/requestBodyOrReference"
- },
- "responses": {
- "$ref": "#/definitions/responses"
- },
- "callbacks": {
- "$ref": "#/definitions/callbacksOrReferences"
- },
- "deprecated": {
- "type": "boolean"
- },
- "security": {
- "type": "array",
- "items": {
- "$ref": "#/definitions/securityRequirement"
- },
- "uniqueItems": true
- },
- "servers": {
- "type": "array",
- "items": {
- "$ref": "#/definitions/server"
- },
- "uniqueItems": true
- }
- }
- },
- "externalDocs": {
- "type": "object",
- "description": "Allows referencing an external resource for extended documentation.",
- "required": [
- "url"
- ],
- "additionalProperties": false,
- "patternProperties": {
- "^x-": {
- "$ref": "#/definitions/specificationExtension"
- }
- },
- "properties": {
- "description": {
- "type": "string"
- },
- "url": {
- "type": "string"
- }
- }
- },
- "parameter": {
- "type": "object",
- "description": "Describes a single operation parameter. A unique parameter is defined by a combination of a name and location.",
- "required": [
- "name",
- "in"
- ],
- "additionalProperties": false,
- "patternProperties": {
- "^x-": {
- "$ref": "#/definitions/specificationExtension"
- }
- },
- "properties": {
- "name": {
- "type": "string"
- },
- "in": {
- "type": "string"
- },
- "description": {
- "type": "string"
- },
- "required": {
- "type": "boolean"
- },
- "deprecated": {
- "type": "boolean"
- },
- "allowEmptyValue": {
- "type": "boolean"
- },
- "style": {
- "type": "string"
- },
- "explode": {
- "type": "boolean"
- },
- "allowReserved": {
- "type": "boolean"
- },
- "schema": {
- "$ref": "#/definitions/schemaOrReference"
- },
- "example": {
- "$ref": "#/definitions/any"
- },
- "examples": {
- "$ref": "#/definitions/examplesOrReferences"
- },
- "content": {
- "$ref": "#/definitions/mediaTypes"
- }
- }
- },
- "requestBody": {
- "type": "object",
- "description": "Describes a single request body.",
- "required": [
- "content"
- ],
- "additionalProperties": false,
- "patternProperties": {
- "^x-": {
- "$ref": "#/definitions/specificationExtension"
- }
- },
- "properties": {
- "description": {
- "type": "string"
- },
- "content": {
- "$ref": "#/definitions/mediaTypes"
- },
- "required": {
- "type": "boolean"
- }
- }
- },
- "mediaType": {
- "type": "object",
- "description": "Each Media Type Object provides schema and examples for the media type identified by its key.",
- "additionalProperties": false,
- "patternProperties": {
- "^x-": {
- "$ref": "#/definitions/specificationExtension"
- }
- },
- "properties": {
- "schema": {
- "$ref": "#/definitions/schemaOrReference"
- },
- "example": {
- "$ref": "#/definitions/any"
- },
- "examples": {
- "$ref": "#/definitions/examplesOrReferences"
- },
- "encoding": {
- "$ref": "#/definitions/encodings"
- }
- }
- },
- "encoding": {
- "type": "object",
- "description": "A single encoding definition applied to a single schema property.",
- "additionalProperties": false,
- "patternProperties": {
- "^x-": {
- "$ref": "#/definitions/specificationExtension"
- }
- },
- "properties": {
- "contentType": {
- "type": "string"
- },
- "headers": {
- "$ref": "#/definitions/headersOrReferences"
- },
- "style": {
- "type": "string"
- },
- "explode": {
- "type": "boolean"
- },
- "allowReserved": {
- "type": "boolean"
- }
- }
- },
- "responses": {
- "type": "object",
- "description": "A container for the expected responses of an operation. The container maps a HTTP response code to the expected response. The documentation is not necessarily expected to cover all possible HTTP response codes because they may not be known in advance. However, documentation is expected to cover a successful operation response and any known errors. The `default` MAY be used as a default response object for all HTTP codes that are not covered individually by the specification. The `Responses Object` MUST contain at least one response code, and it SHOULD be the response for a successful operation call.",
- "additionalProperties": false,
- "patternProperties": {
- "^([0-9X]{3})$": {
- "$ref": "#/definitions/responseOrReference"
- },
- "^x-": {
- "$ref": "#/definitions/specificationExtension"
- }
- },
- "properties": {
- "default": {
- "$ref": "#/definitions/responseOrReference"
- }
- }
- },
- "response": {
- "type": "object",
- "description": "Describes a single response from an API Operation, including design-time, static `links` to operations based on the response.",
- "required": [
- "description"
- ],
- "additionalProperties": false,
- "patternProperties": {
- "^x-": {
- "$ref": "#/definitions/specificationExtension"
- }
- },
- "properties": {
- "description": {
- "type": "string"
- },
- "headers": {
- "$ref": "#/definitions/headersOrReferences"
- },
- "content": {
- "$ref": "#/definitions/mediaTypes"
- },
- "links": {
- "$ref": "#/definitions/linksOrReferences"
- }
- }
- },
- "callback": {
- "type": "object",
- "description": "A map of possible out-of band callbacks related to the parent operation. Each value in the map is a Path Item Object that describes a set of requests that may be initiated by the API provider and the expected responses. The key value used to identify the callback object is an expression, evaluated at runtime, that identifies a URL to use for the callback operation.",
- "additionalProperties": false,
- "patternProperties": {
- "^": {
- "$ref": "#/definitions/pathItem"
- },
- "^x-": {
- "$ref": "#/definitions/specificationExtension"
- }
- }
- },
- "example": {
- "type": "object",
- "description": "",
- "additionalProperties": false,
- "patternProperties": {
- "^x-": {
- "$ref": "#/definitions/specificationExtension"
- }
- },
- "properties": {
- "summary": {
- "type": "string"
- },
- "description": {
- "type": "string"
- },
- "value": {
- "$ref": "#/definitions/any"
- },
- "externalValue": {
- "type": "string"
- }
- }
- },
- "link": {
- "type": "object",
- "description": "The `Link object` represents a possible design-time link for a response. The presence of a link does not guarantee the caller's ability to successfully invoke it, rather it provides a known relationship and traversal mechanism between responses and other operations. Unlike _dynamic_ links (i.e. links provided **in** the response payload), the OAS linking mechanism does not require link information in the runtime response. For computing links, and providing instructions to execute them, a runtime expression is used for accessing values in an operation and using them as parameters while invoking the linked operation.",
- "additionalProperties": false,
- "patternProperties": {
- "^x-": {
- "$ref": "#/definitions/specificationExtension"
- }
- },
- "properties": {
- "operationRef": {
- "type": "string"
- },
- "operationId": {
- "type": "string"
- },
- "parameters": {
- "$ref": "#/definitions/anyOrExpression"
- },
- "requestBody": {
- "$ref": "#/definitions/anyOrExpression"
- },
- "description": {
- "type": "string"
- },
- "server": {
- "$ref": "#/definitions/server"
- }
- }
- },
- "header": {
- "type": "object",
- "description": "The Header Object follows the structure of the Parameter Object with the following changes: 1. `name` MUST NOT be specified, it is given in the corresponding `headers` map. 1. `in` MUST NOT be specified, it is implicitly in `header`. 1. All traits that are affected by the location MUST be applicable to a location of `header` (for example, `style`).",
- "additionalProperties": false,
- "patternProperties": {
- "^x-": {
- "$ref": "#/definitions/specificationExtension"
- }
- },
- "properties": {
- "description": {
- "type": "string"
- },
- "required": {
- "type": "boolean"
- },
- "deprecated": {
- "type": "boolean"
- },
- "allowEmptyValue": {
- "type": "boolean"
- },
- "style": {
- "type": "string"
- },
- "explode": {
- "type": "boolean"
- },
- "allowReserved": {
- "type": "boolean"
- },
- "schema": {
- "$ref": "#/definitions/schemaOrReference"
- },
- "example": {
- "$ref": "#/definitions/any"
- },
- "examples": {
- "$ref": "#/definitions/examplesOrReferences"
- },
- "content": {
- "$ref": "#/definitions/mediaTypes"
- }
- }
- },
- "tag": {
- "type": "object",
- "description": "Adds metadata to a single tag that is used by the Operation Object. It is not mandatory to have a Tag Object per tag defined in the Operation Object instances.",
- "required": [
- "name"
- ],
- "additionalProperties": false,
- "patternProperties": {
- "^x-": {
- "$ref": "#/definitions/specificationExtension"
- }
- },
- "properties": {
- "name": {
- "type": "string"
- },
- "description": {
- "type": "string"
- },
- "externalDocs": {
- "$ref": "#/definitions/externalDocs"
- }
- }
- },
- "reference": {
- "type": "object",
- "description": "A simple object to allow referencing other components in the specification, internally and externally. The Reference Object is defined by JSON Reference and follows the same structure, behavior and rules. For this specification, reference resolution is accomplished as defined by the JSON Reference specification and not by the JSON Schema specification.",
- "required": [
- "$ref"
- ],
- "additionalProperties": false,
- "properties": {
- "$ref": {
- "type": "string"
- },
- "summary": {
- "type": "string"
- },
- "description": {
- "type": "string"
- }
- }
- },
- "schema": {
- "type": "object",
- "description": "The Schema Object allows the definition of input and output data types. These types can be objects, but also primitives and arrays. This object is an extended subset of the JSON Schema Specification Wright Draft 00. For more information about the properties, see JSON Schema Core and JSON Schema Validation. Unless stated otherwise, the property definitions follow the JSON Schema.",
- "additionalProperties": false,
- "patternProperties": {
- "^x-": {
- "$ref": "#/definitions/specificationExtension"
- }
- },
- "properties": {
- "nullable": {
- "type": "boolean"
- },
- "discriminator": {
- "$ref": "#/definitions/discriminator"
- },
- "readOnly": {
- "type": "boolean"
- },
- "writeOnly": {
- "type": "boolean"
- },
- "xml": {
- "$ref": "#/definitions/xml"
- },
- "externalDocs": {
- "$ref": "#/definitions/externalDocs"
- },
- "example": {
- "$ref": "#/definitions/any"
- },
- "deprecated": {
- "type": "boolean"
- },
- "title": {
- "$ref": "http://json-schema.org/draft-04/schema#/properties/title"
- },
- "multipleOf": {
- "$ref": "http://json-schema.org/draft-04/schema#/properties/multipleOf"
- },
- "maximum": {
- "$ref": "http://json-schema.org/draft-04/schema#/properties/maximum"
- },
- "exclusiveMaximum": {
- "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMaximum"
- },
- "minimum": {
- "$ref": "http://json-schema.org/draft-04/schema#/properties/minimum"
- },
- "exclusiveMinimum": {
- "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMinimum"
- },
- "maxLength": {
- "$ref": "http://json-schema.org/draft-04/schema#/properties/maxLength"
- },
- "minLength": {
- "$ref": "http://json-schema.org/draft-04/schema#/properties/minLength"
- },
- "pattern": {
- "$ref": "http://json-schema.org/draft-04/schema#/properties/pattern"
- },
- "maxItems": {
- "$ref": "http://json-schema.org/draft-04/schema#/properties/maxItems"
- },
- "minItems": {
- "$ref": "http://json-schema.org/draft-04/schema#/properties/minItems"
- },
- "uniqueItems": {
- "$ref": "http://json-schema.org/draft-04/schema#/properties/uniqueItems"
- },
- "maxProperties": {
- "$ref": "http://json-schema.org/draft-04/schema#/properties/maxProperties"
- },
- "minProperties": {
- "$ref": "http://json-schema.org/draft-04/schema#/properties/minProperties"
- },
- "required": {
- "$ref": "http://json-schema.org/draft-04/schema#/properties/required"
- },
- "enum": {
- "$ref": "http://json-schema.org/draft-04/schema#/properties/enum"
- },
- "type": {
- "type": "string"
- },
- "allOf": {
- "type": "array",
- "items": {
- "$ref": "#/definitions/schemaOrReference"
- },
- "minItems": 1
- },
- "oneOf": {
- "type": "array",
- "items": {
- "$ref": "#/definitions/schemaOrReference"
- },
- "minItems": 1
- },
- "anyOf": {
- "type": "array",
- "items": {
- "$ref": "#/definitions/schemaOrReference"
- },
- "minItems": 1
- },
- "not": {
- "$ref": "#/definitions/schema"
- },
- "items": {
- "anyOf": [
- {
- "$ref": "#/definitions/schemaOrReference"
- },
- {
- "type": "array",
- "items": {
- "$ref": "#/definitions/schemaOrReference"
- },
- "minItems": 1
- }
- ]
- },
- "properties": {
- "type": "object",
- "additionalProperties": {
- "$ref": "#/definitions/schemaOrReference"
- }
- },
- "additionalProperties": {
- "oneOf": [
- {
- "$ref": "#/definitions/schemaOrReference"
- },
- {
- "type": "boolean"
- }
- ]
- },
- "default": {
- "$ref": "#/definitions/defaultType"
- },
- "description": {
- "type": "string"
- },
- "format": {
- "type": "string"
- }
- }
- },
- "discriminator": {
- "type": "object",
- "description": "When request bodies or response payloads may be one of a number of different schemas, a `discriminator` object can be used to aid in serialization, deserialization, and validation. The discriminator is a specific object in a schema which is used to inform the consumer of the specification of an alternative schema based on the value associated with it. When using the discriminator, _inline_ schemas will not be considered.",
- "required": [
- "propertyName"
- ],
- "additionalProperties": false,
- "patternProperties": {
- "^x-": {
- "$ref": "#/definitions/specificationExtension"
- }
- },
- "properties": {
- "propertyName": {
- "type": "string"
- },
- "mapping": {
- "$ref": "#/definitions/strings"
- }
- }
- },
- "xml": {
- "type": "object",
- "description": "A metadata object that allows for more fine-tuned XML model definitions. When using arrays, XML element names are *not* inferred (for singular/plural forms) and the `name` property SHOULD be used to add that information. See examples for expected behavior.",
- "additionalProperties": false,
- "patternProperties": {
- "^x-": {
- "$ref": "#/definitions/specificationExtension"
- }
- },
- "properties": {
- "name": {
- "type": "string"
- },
- "namespace": {
- "type": "string"
- },
- "prefix": {
- "type": "string"
- },
- "attribute": {
- "type": "boolean"
- },
- "wrapped": {
- "type": "boolean"
- }
- }
- },
- "securityScheme": {
- "type": "object",
- "description": "Defines a security scheme that can be used by the operations. Supported schemes are HTTP authentication, an API key (either as a header, a cookie parameter or as a query parameter), mutual TLS (use of a client certificate), OAuth2's common flows (implicit, password, application and access code) as defined in RFC6749, and OpenID Connect. Please note that currently (2019) the implicit flow is about to be deprecated OAuth 2.0 Security Best Current Practice. Recommended for most use case is Authorization Code Grant flow with PKCE.",
- "required": [
- "type"
- ],
- "additionalProperties": false,
- "patternProperties": {
- "^x-": {
- "$ref": "#/definitions/specificationExtension"
- }
- },
- "properties": {
- "type": {
- "type": "string"
- },
- "description": {
- "type": "string"
- },
- "name": {
- "type": "string"
- },
- "in": {
- "type": "string"
- },
- "scheme": {
- "type": "string"
- },
- "bearerFormat": {
- "type": "string"
- },
- "flows": {
- "$ref": "#/definitions/oauthFlows"
- },
- "openIdConnectUrl": {
- "type": "string"
- }
- }
- },
- "oauthFlows": {
- "type": "object",
- "description": "Allows configuration of the supported OAuth Flows.",
- "additionalProperties": false,
- "patternProperties": {
- "^x-": {
- "$ref": "#/definitions/specificationExtension"
- }
- },
- "properties": {
- "implicit": {
- "$ref": "#/definitions/oauthFlow"
- },
- "password": {
- "$ref": "#/definitions/oauthFlow"
- },
- "clientCredentials": {
- "$ref": "#/definitions/oauthFlow"
- },
- "authorizationCode": {
- "$ref": "#/definitions/oauthFlow"
- }
- }
- },
- "oauthFlow": {
- "type": "object",
- "description": "Configuration details for a supported OAuth Flow",
- "additionalProperties": false,
- "patternProperties": {
- "^x-": {
- "$ref": "#/definitions/specificationExtension"
- }
- },
- "properties": {
- "authorizationUrl": {
- "type": "string"
- },
- "tokenUrl": {
- "type": "string"
- },
- "refreshUrl": {
- "type": "string"
- },
- "scopes": {
- "$ref": "#/definitions/strings"
- }
- }
- },
- "securityRequirement": {
- "type": "object",
- "description": "Lists the required security schemes to execute this operation. The name used for each property MUST correspond to a security scheme declared in the Security Schemes under the Components Object. Security Requirement Objects that contain multiple schemes require that all schemes MUST be satisfied for a request to be authorized. This enables support for scenarios where multiple query parameters or HTTP headers are required to convey security information. When a list of Security Requirement Objects is defined on the OpenAPI Object or Operation Object, only one of the Security Requirement Objects in the list needs to be satisfied to authorize the request.",
- "additionalProperties": {
- "type": "array",
- "items": {
- "type": "string"
- },
- "uniqueItems": true
- }
- },
- "anyOrExpression": {
- "oneOf": [
- {
- "$ref": "#/definitions/any"
- },
- {
- "$ref": "#/definitions/expression"
- }
- ]
- },
- "callbackOrReference": {
- "oneOf": [
- {
- "$ref": "#/definitions/callback"
- },
- {
- "$ref": "#/definitions/reference"
- }
- ]
- },
- "exampleOrReference": {
- "oneOf": [
- {
- "$ref": "#/definitions/example"
- },
- {
- "$ref": "#/definitions/reference"
- }
- ]
- },
- "headerOrReference": {
- "oneOf": [
- {
- "$ref": "#/definitions/header"
- },
- {
- "$ref": "#/definitions/reference"
- }
- ]
- },
- "linkOrReference": {
- "oneOf": [
- {
- "$ref": "#/definitions/link"
- },
- {
- "$ref": "#/definitions/reference"
- }
- ]
- },
- "parameterOrReference": {
- "oneOf": [
- {
- "$ref": "#/definitions/parameter"
- },
- {
- "$ref": "#/definitions/reference"
- }
- ]
- },
- "requestBodyOrReference": {
- "oneOf": [
- {
- "$ref": "#/definitions/requestBody"
- },
- {
- "$ref": "#/definitions/reference"
- }
- ]
- },
- "responseOrReference": {
- "oneOf": [
- {
- "$ref": "#/definitions/response"
- },
- {
- "$ref": "#/definitions/reference"
- }
- ]
- },
- "schemaOrReference": {
- "oneOf": [
- {
- "$ref": "#/definitions/schema"
- },
- {
- "$ref": "#/definitions/reference"
- }
- ]
- },
- "securitySchemeOrReference": {
- "oneOf": [
- {
- "$ref": "#/definitions/securityScheme"
- },
- {
- "$ref": "#/definitions/reference"
- }
- ]
- },
- "callbacksOrReferences": {
- "type": "object",
- "additionalProperties": {
- "$ref": "#/definitions/callbackOrReference"
- }
- },
- "encodings": {
- "type": "object",
- "additionalProperties": {
- "$ref": "#/definitions/encoding"
- }
- },
- "examplesOrReferences": {
- "type": "object",
- "additionalProperties": {
- "$ref": "#/definitions/exampleOrReference"
- }
- },
- "headersOrReferences": {
- "type": "object",
- "additionalProperties": {
- "$ref": "#/definitions/headerOrReference"
- }
- },
- "linksOrReferences": {
- "type": "object",
- "additionalProperties": {
- "$ref": "#/definitions/linkOrReference"
- }
- },
- "mediaTypes": {
- "type": "object",
- "additionalProperties": {
- "$ref": "#/definitions/mediaType"
- }
- },
- "parametersOrReferences": {
- "type": "object",
- "additionalProperties": {
- "$ref": "#/definitions/parameterOrReference"
- }
- },
- "requestBodiesOrReferences": {
- "type": "object",
- "additionalProperties": {
- "$ref": "#/definitions/requestBodyOrReference"
- }
- },
- "responsesOrReferences": {
- "type": "object",
- "additionalProperties": {
- "$ref": "#/definitions/responseOrReference"
- }
- },
- "schemasOrReferences": {
- "type": "object",
- "additionalProperties": {
- "$ref": "#/definitions/schemaOrReference"
- }
- },
- "securitySchemesOrReferences": {
- "type": "object",
- "additionalProperties": {
- "$ref": "#/definitions/securitySchemeOrReference"
- }
- },
- "serverVariables": {
- "type": "object",
- "additionalProperties": {
- "$ref": "#/definitions/serverVariable"
- }
- },
- "strings": {
- "type": "object",
- "additionalProperties": {
- "type": "string"
- }
- },
- "object": {
- "type": "object",
- "additionalProperties": true
- },
- "any": {
- "additionalProperties": true
- },
- "expression": {
- "type": "object",
- "additionalProperties": true
- },
- "specificationExtension": {
- "description": "Any property starting with x- is valid.",
- "oneOf": [
- {
- "type": "null"
- },
- {
- "type": "number"
- },
- {
- "type": "boolean"
- },
- {
- "type": "string"
- },
- {
- "type": "object"
- },
- {
- "type": "array"
- }
- ]
- },
- "defaultType": {
- "oneOf": [
- {
- "type": "null"
- },
- {
- "type": "array"
- },
- {
- "type": "object"
- },
- {
- "type": "number"
- },
- {
- "type": "boolean"
- },
- {
- "type": "string"
- }
- ]
- }
- }
-}
diff --git a/vendor/github.com/google/pprof/profile/encode.go b/vendor/github.com/google/pprof/profile/encode.go
index ab7f03ae26..182c926b90 100644
--- a/vendor/github.com/google/pprof/profile/encode.go
+++ b/vendor/github.com/google/pprof/profile/encode.go
@@ -17,6 +17,7 @@ package profile
import (
"errors"
"sort"
+ "strings"
)
func (p *Profile) decoder() []decoder {
@@ -183,12 +184,13 @@ var profileDecoder = []decoder{
// repeated Location location = 4
func(b *buffer, m message) error {
x := new(Location)
- x.Line = make([]Line, 0, 8) // Pre-allocate Line buffer
+ x.Line = b.tmpLines[:0] // Use shared space temporarily
pp := m.(*Profile)
pp.Location = append(pp.Location, x)
err := decodeMessage(b, x)
- var tmp []Line
- x.Line = append(tmp, x.Line...) // Shrink to allocated size
+ b.tmpLines = x.Line[:0]
+ // Copy to shrink size and detach from shared space.
+ x.Line = append([]Line(nil), x.Line...)
return err
},
// repeated Function function = 5
@@ -252,6 +254,14 @@ func (p *Profile) postDecode() error {
} else {
mappings[m.ID] = m
}
+
+ // If this a main linux kernel mapping with a relocation symbol suffix
+ // ("[kernel.kallsyms]_text"), extract said suffix.
+ // It is fairly hacky to handle at this level, but the alternatives appear even worse.
+ const prefix = "[kernel.kallsyms]"
+ if strings.HasPrefix(m.File, prefix) {
+ m.KernelRelocationSymbol = m.File[len(prefix):]
+ }
}
functions := make(map[uint64]*Function, len(p.Function))
@@ -298,41 +308,52 @@ func (p *Profile) postDecode() error {
st.Unit, err = getString(p.stringTable, &st.unitX, err)
}
+ // Pre-allocate space for all locations.
+ numLocations := 0
+ for _, s := range p.Sample {
+ numLocations += len(s.locationIDX)
+ }
+ locBuffer := make([]*Location, numLocations)
+
for _, s := range p.Sample {
- labels := make(map[string][]string, len(s.labelX))
- numLabels := make(map[string][]int64, len(s.labelX))
- numUnits := make(map[string][]string, len(s.labelX))
- for _, l := range s.labelX {
- var key, value string
- key, err = getString(p.stringTable, &l.keyX, err)
- if l.strX != 0 {
- value, err = getString(p.stringTable, &l.strX, err)
- labels[key] = append(labels[key], value)
- } else if l.numX != 0 || l.unitX != 0 {
- numValues := numLabels[key]
- units := numUnits[key]
- if l.unitX != 0 {
- var unit string
- unit, err = getString(p.stringTable, &l.unitX, err)
- units = padStringArray(units, len(numValues))
- numUnits[key] = append(units, unit)
+ if len(s.labelX) > 0 {
+ labels := make(map[string][]string, len(s.labelX))
+ numLabels := make(map[string][]int64, len(s.labelX))
+ numUnits := make(map[string][]string, len(s.labelX))
+ for _, l := range s.labelX {
+ var key, value string
+ key, err = getString(p.stringTable, &l.keyX, err)
+ if l.strX != 0 {
+ value, err = getString(p.stringTable, &l.strX, err)
+ labels[key] = append(labels[key], value)
+ } else if l.numX != 0 || l.unitX != 0 {
+ numValues := numLabels[key]
+ units := numUnits[key]
+ if l.unitX != 0 {
+ var unit string
+ unit, err = getString(p.stringTable, &l.unitX, err)
+ units = padStringArray(units, len(numValues))
+ numUnits[key] = append(units, unit)
+ }
+ numLabels[key] = append(numLabels[key], l.numX)
}
- numLabels[key] = append(numLabels[key], l.numX)
}
- }
- if len(labels) > 0 {
- s.Label = labels
- }
- if len(numLabels) > 0 {
- s.NumLabel = numLabels
- for key, units := range numUnits {
- if len(units) > 0 {
- numUnits[key] = padStringArray(units, len(numLabels[key]))
+ if len(labels) > 0 {
+ s.Label = labels
+ }
+ if len(numLabels) > 0 {
+ s.NumLabel = numLabels
+ for key, units := range numUnits {
+ if len(units) > 0 {
+ numUnits[key] = padStringArray(units, len(numLabels[key]))
+ }
}
+ s.NumUnit = numUnits
}
- s.NumUnit = numUnits
}
- s.Location = make([]*Location, len(s.locationIDX))
+
+ s.Location = locBuffer[:len(s.locationIDX)]
+ locBuffer = locBuffer[len(s.locationIDX):]
for i, lid := range s.locationIDX {
if lid < uint64(len(locationIds)) {
s.Location[i] = locationIds[lid]
diff --git a/vendor/github.com/google/pprof/profile/filter.go b/vendor/github.com/google/pprof/profile/filter.go
index ea8e66c68d..c794b93906 100644
--- a/vendor/github.com/google/pprof/profile/filter.go
+++ b/vendor/github.com/google/pprof/profile/filter.go
@@ -22,6 +22,10 @@ import "regexp"
// samples where at least one frame matches focus but none match ignore.
// Returns true is the corresponding regexp matched at least one sample.
func (p *Profile) FilterSamplesByName(focus, ignore, hide, show *regexp.Regexp) (fm, im, hm, hnm bool) {
+ if focus == nil && ignore == nil && hide == nil && show == nil {
+ fm = true // Missing focus implies a match
+ return
+ }
focusOrIgnore := make(map[uint64]bool)
hidden := make(map[uint64]bool)
for _, l := range p.Location {
diff --git a/vendor/github.com/google/pprof/profile/legacy_profile.go b/vendor/github.com/google/pprof/profile/legacy_profile.go
index 0c8f3bb5b7..8d07fd6c27 100644
--- a/vendor/github.com/google/pprof/profile/legacy_profile.go
+++ b/vendor/github.com/google/pprof/profile/legacy_profile.go
@@ -295,11 +295,12 @@ func get64b(b []byte) (uint64, []byte) {
//
// The general format for profilez samples is a sequence of words in
// binary format. The first words are a header with the following data:
-// 1st word -- 0
-// 2nd word -- 3
-// 3rd word -- 0 if a c++ application, 1 if a java application.
-// 4th word -- Sampling period (in microseconds).
-// 5th word -- Padding.
+//
+// 1st word -- 0
+// 2nd word -- 3
+// 3rd word -- 0 if a c++ application, 1 if a java application.
+// 4th word -- Sampling period (in microseconds).
+// 5th word -- Padding.
func parseCPU(b []byte) (*Profile, error) {
var parse func([]byte) (uint64, []byte)
var n1, n2, n3, n4, n5 uint64
@@ -403,15 +404,18 @@ func cleanupDuplicateLocations(p *Profile) {
//
// profilez samples are a repeated sequence of stack frames of the
// form:
-// 1st word -- The number of times this stack was encountered.
-// 2nd word -- The size of the stack (StackSize).
-// 3rd word -- The first address on the stack.
-// ...
-// StackSize + 2 -- The last address on the stack
+//
+// 1st word -- The number of times this stack was encountered.
+// 2nd word -- The size of the stack (StackSize).
+// 3rd word -- The first address on the stack.
+// ...
+// StackSize + 2 -- The last address on the stack
+//
// The last stack trace is of the form:
-// 1st word -- 0
-// 2nd word -- 1
-// 3rd word -- 0
+//
+// 1st word -- 0
+// 2nd word -- 1
+// 3rd word -- 0
//
// Addresses from stack traces may point to the next instruction after
// each call. Optionally adjust by -1 to land somewhere on the actual
@@ -861,7 +865,6 @@ func parseThread(b []byte) (*Profile, error) {
// Recognize each thread and populate profile samples.
for !isMemoryMapSentinel(line) {
if strings.HasPrefix(line, "---- no stack trace for") {
- line = ""
break
}
if t := threadStartRE.FindStringSubmatch(line); len(t) != 4 {
diff --git a/vendor/github.com/google/pprof/profile/merge.go b/vendor/github.com/google/pprof/profile/merge.go
index 9978e7330e..4b66282cb8 100644
--- a/vendor/github.com/google/pprof/profile/merge.go
+++ b/vendor/github.com/google/pprof/profile/merge.go
@@ -15,6 +15,7 @@
package profile
import (
+ "encoding/binary"
"fmt"
"sort"
"strconv"
@@ -58,7 +59,7 @@ func Merge(srcs []*Profile) (*Profile, error) {
for _, src := range srcs {
// Clear the profile-specific hash tables
- pm.locationsByID = make(map[uint64]*Location, len(src.Location))
+ pm.locationsByID = makeLocationIDMap(len(src.Location))
pm.functionsByID = make(map[uint64]*Function, len(src.Function))
pm.mappingsByID = make(map[uint64]mapInfo, len(src.Mapping))
@@ -136,7 +137,7 @@ type profileMerger struct {
p *Profile
// Memoization tables within a profile.
- locationsByID map[uint64]*Location
+ locationsByID locationIDMap
functionsByID map[uint64]*Function
mappingsByID map[uint64]mapInfo
@@ -153,6 +154,16 @@ type mapInfo struct {
}
func (pm *profileMerger) mapSample(src *Sample) *Sample {
+ // Check memoization table
+ k := pm.sampleKey(src)
+ if ss, ok := pm.samples[k]; ok {
+ for i, v := range src.Value {
+ ss.Value[i] += v
+ }
+ return ss
+ }
+
+ // Make new sample.
s := &Sample{
Location: make([]*Location, len(src.Location)),
Value: make([]int64, len(src.Value)),
@@ -177,52 +188,98 @@ func (pm *profileMerger) mapSample(src *Sample) *Sample {
s.NumLabel[k] = vv
s.NumUnit[k] = uu
}
- // Check memoization table. Must be done on the remapped location to
- // account for the remapped mapping. Add current values to the
- // existing sample.
- k := s.key()
- if ss, ok := pm.samples[k]; ok {
- for i, v := range src.Value {
- ss.Value[i] += v
- }
- return ss
- }
copy(s.Value, src.Value)
pm.samples[k] = s
pm.p.Sample = append(pm.p.Sample, s)
return s
}
-// key generates sampleKey to be used as a key for maps.
-func (sample *Sample) key() sampleKey {
- ids := make([]string, len(sample.Location))
- for i, l := range sample.Location {
- ids[i] = strconv.FormatUint(l.ID, 16)
+func (pm *profileMerger) sampleKey(sample *Sample) sampleKey {
+ // Accumulate contents into a string.
+ var buf strings.Builder
+ buf.Grow(64) // Heuristic to avoid extra allocs
+
+ // encode a number
+ putNumber := func(v uint64) {
+ var num [binary.MaxVarintLen64]byte
+ n := binary.PutUvarint(num[:], v)
+ buf.Write(num[:n])
+ }
+
+ // encode a string prefixed with its length.
+ putDelimitedString := func(s string) {
+ putNumber(uint64(len(s)))
+ buf.WriteString(s)
+ }
+
+ for _, l := range sample.Location {
+ // Get the location in the merged profile, which may have a different ID.
+ if loc := pm.mapLocation(l); loc != nil {
+ putNumber(loc.ID)
+ }
}
+ putNumber(0) // Delimiter
- labels := make([]string, 0, len(sample.Label))
- for k, v := range sample.Label {
- labels = append(labels, fmt.Sprintf("%q%q", k, v))
+ for _, l := range sortedKeys1(sample.Label) {
+ putDelimitedString(l)
+ values := sample.Label[l]
+ putNumber(uint64(len(values)))
+ for _, v := range values {
+ putDelimitedString(v)
+ }
}
- sort.Strings(labels)
- numlabels := make([]string, 0, len(sample.NumLabel))
- for k, v := range sample.NumLabel {
- numlabels = append(numlabels, fmt.Sprintf("%q%x%x", k, v, sample.NumUnit[k]))
+ for _, l := range sortedKeys2(sample.NumLabel) {
+ putDelimitedString(l)
+ values := sample.NumLabel[l]
+ putNumber(uint64(len(values)))
+ for _, v := range values {
+ putNumber(uint64(v))
+ }
+ units := sample.NumUnit[l]
+ putNumber(uint64(len(units)))
+ for _, v := range units {
+ putDelimitedString(v)
+ }
}
- sort.Strings(numlabels)
- return sampleKey{
- strings.Join(ids, "|"),
- strings.Join(labels, ""),
- strings.Join(numlabels, ""),
+ return sampleKey(buf.String())
+}
+
+type sampleKey string
+
+// sortedKeys1 returns the sorted keys found in a string->[]string map.
+//
+// Note: this is currently non-generic since github pprof runs golint,
+// which does not support generics. When that issue is fixed, it can
+// be merged with sortedKeys2 and made into a generic function.
+func sortedKeys1(m map[string][]string) []string {
+ if len(m) == 0 {
+ return nil
}
+ keys := make([]string, 0, len(m))
+ for k := range m {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+ return keys
}
-type sampleKey struct {
- locations string
- labels string
- numlabels string
+// sortedKeys2 returns the sorted keys found in a string->[]int64 map.
+//
+// Note: this is currently non-generic since github pprof runs golint,
+// which does not support generics. When that issue is fixed, it can
+// be merged with sortedKeys1 and made into a generic function.
+func sortedKeys2(m map[string][]int64) []string {
+ if len(m) == 0 {
+ return nil
+ }
+ keys := make([]string, 0, len(m))
+ for k := range m {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+ return keys
}
func (pm *profileMerger) mapLocation(src *Location) *Location {
@@ -230,7 +287,7 @@ func (pm *profileMerger) mapLocation(src *Location) *Location {
return nil
}
- if l, ok := pm.locationsByID[src.ID]; ok {
+ if l := pm.locationsByID.get(src.ID); l != nil {
return l
}
@@ -249,10 +306,10 @@ func (pm *profileMerger) mapLocation(src *Location) *Location {
// account for the remapped mapping ID.
k := l.key()
if ll, ok := pm.locations[k]; ok {
- pm.locationsByID[src.ID] = ll
+ pm.locationsByID.set(src.ID, ll)
return ll
}
- pm.locationsByID[src.ID] = l
+ pm.locationsByID.set(src.ID, l)
pm.locations[k] = l
pm.p.Location = append(pm.p.Location, l)
return l
@@ -303,16 +360,17 @@ func (pm *profileMerger) mapMapping(src *Mapping) mapInfo {
return mi
}
m := &Mapping{
- ID: uint64(len(pm.p.Mapping) + 1),
- Start: src.Start,
- Limit: src.Limit,
- Offset: src.Offset,
- File: src.File,
- BuildID: src.BuildID,
- HasFunctions: src.HasFunctions,
- HasFilenames: src.HasFilenames,
- HasLineNumbers: src.HasLineNumbers,
- HasInlineFrames: src.HasInlineFrames,
+ ID: uint64(len(pm.p.Mapping) + 1),
+ Start: src.Start,
+ Limit: src.Limit,
+ Offset: src.Offset,
+ File: src.File,
+ KernelRelocationSymbol: src.KernelRelocationSymbol,
+ BuildID: src.BuildID,
+ HasFunctions: src.HasFunctions,
+ HasFilenames: src.HasFilenames,
+ HasLineNumbers: src.HasLineNumbers,
+ HasInlineFrames: src.HasInlineFrames,
}
pm.p.Mapping = append(pm.p.Mapping, m)
@@ -479,3 +537,131 @@ func (p *Profile) compatible(pb *Profile) error {
func equalValueType(st1, st2 *ValueType) bool {
return st1.Type == st2.Type && st1.Unit == st2.Unit
}
+
+// locationIDMap is like a map[uint64]*Location, but provides efficiency for
+// ids that are densely numbered, which is often the case.
+type locationIDMap struct {
+ dense []*Location // indexed by id for id < len(dense)
+ sparse map[uint64]*Location // indexed by id for id >= len(dense)
+}
+
+func makeLocationIDMap(n int) locationIDMap {
+ return locationIDMap{
+ dense: make([]*Location, n),
+ sparse: map[uint64]*Location{},
+ }
+}
+
+func (lm locationIDMap) get(id uint64) *Location {
+ if id < uint64(len(lm.dense)) {
+ return lm.dense[int(id)]
+ }
+ return lm.sparse[id]
+}
+
+func (lm locationIDMap) set(id uint64, loc *Location) {
+ if id < uint64(len(lm.dense)) {
+ lm.dense[id] = loc
+ return
+ }
+ lm.sparse[id] = loc
+}
+
+// CompatibilizeSampleTypes makes profiles compatible to be compared/merged. It
+// keeps sample types that appear in all profiles only and drops/reorders the
+// sample types as necessary.
+//
+// In the case of sample types order is not the same for given profiles the
+// order is derived from the first profile.
+//
+// Profiles are modified in-place.
+//
+// It returns an error if the sample type's intersection is empty.
+func CompatibilizeSampleTypes(ps []*Profile) error {
+ sTypes := commonSampleTypes(ps)
+ if len(sTypes) == 0 {
+ return fmt.Errorf("profiles have empty common sample type list")
+ }
+ for _, p := range ps {
+ if err := compatibilizeSampleTypes(p, sTypes); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// commonSampleTypes returns sample types that appear in all profiles in the
+// order how they ordered in the first profile.
+func commonSampleTypes(ps []*Profile) []string {
+ if len(ps) == 0 {
+ return nil
+ }
+ sTypes := map[string]int{}
+ for _, p := range ps {
+ for _, st := range p.SampleType {
+ sTypes[st.Type]++
+ }
+ }
+ var res []string
+ for _, st := range ps[0].SampleType {
+ if sTypes[st.Type] == len(ps) {
+ res = append(res, st.Type)
+ }
+ }
+ return res
+}
+
+// compatibilizeSampleTypes drops sample types that are not present in sTypes
+// list and reorder them if needed.
+//
+// It sets DefaultSampleType to sType[0] if it is not in sType list.
+//
+// It assumes that all sample types from the sTypes list are present in the
+// given profile otherwise it returns an error.
+func compatibilizeSampleTypes(p *Profile, sTypes []string) error {
+ if len(sTypes) == 0 {
+ return fmt.Errorf("sample type list is empty")
+ }
+ defaultSampleType := sTypes[0]
+ reMap, needToModify := make([]int, len(sTypes)), false
+ for i, st := range sTypes {
+ if st == p.DefaultSampleType {
+ defaultSampleType = p.DefaultSampleType
+ }
+ idx := searchValueType(p.SampleType, st)
+ if idx < 0 {
+ return fmt.Errorf("%q sample type is not found in profile", st)
+ }
+ reMap[i] = idx
+ if idx != i {
+ needToModify = true
+ }
+ }
+ if !needToModify && len(sTypes) == len(p.SampleType) {
+ return nil
+ }
+ p.DefaultSampleType = defaultSampleType
+ oldSampleTypes := p.SampleType
+ p.SampleType = make([]*ValueType, len(sTypes))
+ for i, idx := range reMap {
+ p.SampleType[i] = oldSampleTypes[idx]
+ }
+ values := make([]int64, len(sTypes))
+ for _, s := range p.Sample {
+ for i, idx := range reMap {
+ values[i] = s.Value[idx]
+ }
+ s.Value = s.Value[:len(values)]
+ copy(s.Value, values)
+ }
+ return nil
+}
+
+func searchValueType(vts []*ValueType, s string) int {
+ for i, vt := range vts {
+ if vt.Type == s {
+ return i
+ }
+ }
+ return -1
+}
diff --git a/vendor/github.com/google/pprof/profile/profile.go b/vendor/github.com/google/pprof/profile/profile.go
index 2590c8ddb4..60ef7e9268 100644
--- a/vendor/github.com/google/pprof/profile/profile.go
+++ b/vendor/github.com/google/pprof/profile/profile.go
@@ -21,7 +21,6 @@ import (
"compress/gzip"
"fmt"
"io"
- "io/ioutil"
"math"
"path/filepath"
"regexp"
@@ -73,9 +72,23 @@ type ValueType struct {
type Sample struct {
Location []*Location
Value []int64
- Label map[string][]string
+ // Label is a per-label-key map to values for string labels.
+ //
+ // In general, having multiple values for the given label key is strongly
+ // discouraged - see docs for the sample label field in profile.proto. The
+ // main reason this unlikely state is tracked here is to make the
+ // decoding->encoding roundtrip not lossy. But we expect that the value
+ // slices present in this map are always of length 1.
+ Label map[string][]string
+ // NumLabel is a per-label-key map to values for numeric labels. See a note
+ // above on handling multiple values for a label.
NumLabel map[string][]int64
- NumUnit map[string][]string
+ // NumUnit is a per-label-key map to the unit names of corresponding numeric
+ // label values. The unit info may be missing even if the label is in
+ // NumLabel, see the docs in profile.proto for details. When the value is
+ // slice is present and not nil, its length must be equal to the length of
+ // the corresponding value slice in NumLabel.
+ NumUnit map[string][]string
locationIDX []uint64
labelX []label
@@ -106,6 +119,15 @@ type Mapping struct {
fileX int64
buildIDX int64
+
+ // Name of the kernel relocation symbol ("_text" or "_stext"), extracted from File.
+ // For linux kernel mappings generated by some tools, correct symbolization depends
+ // on knowing which of the two possible relocation symbols was used for `Start`.
+ // This is given to us as a suffix in `File` (e.g. "[kernel.kallsyms]_stext").
+ //
+ // Note, this public field is not persisted in the proto. For the purposes of
+ // copying / merging / hashing profiles, it is considered subsumed by `File`.
+ KernelRelocationSymbol string
}
// Location corresponds to Profile.Location
@@ -144,7 +166,7 @@ type Function struct {
// may be a gzip-compressed encoded protobuf or one of many legacy
// profile formats which may be unsupported in the future.
func Parse(r io.Reader) (*Profile, error) {
- data, err := ioutil.ReadAll(r)
+ data, err := io.ReadAll(r)
if err != nil {
return nil, err
}
@@ -159,7 +181,7 @@ func ParseData(data []byte) (*Profile, error) {
if len(data) >= 2 && data[0] == 0x1f && data[1] == 0x8b {
gz, err := gzip.NewReader(bytes.NewBuffer(data))
if err == nil {
- data, err = ioutil.ReadAll(gz)
+ data, err = io.ReadAll(gz)
}
if err != nil {
return nil, fmt.Errorf("decompressing profile: %v", err)
@@ -707,6 +729,35 @@ func (s *Sample) HasLabel(key, value string) bool {
return false
}
+// SetNumLabel sets the specified key to the specified value for all samples in the
+// profile. "unit" is a slice that describes the units that each corresponding member
+// of "values" is measured in (e.g. bytes or seconds). If there is no relevant
+// unit for a given value, that member of "unit" should be the empty string.
+// "unit" must either have the same length as "value", or be nil.
+func (p *Profile) SetNumLabel(key string, value []int64, unit []string) {
+ for _, sample := range p.Sample {
+ if sample.NumLabel == nil {
+ sample.NumLabel = map[string][]int64{key: value}
+ } else {
+ sample.NumLabel[key] = value
+ }
+ if sample.NumUnit == nil {
+ sample.NumUnit = map[string][]string{key: unit}
+ } else {
+ sample.NumUnit[key] = unit
+ }
+ }
+}
+
+// RemoveNumLabel removes all numerical labels associated with the specified key for all
+// samples in the profile.
+func (p *Profile) RemoveNumLabel(key string) {
+ for _, sample := range p.Sample {
+ delete(sample.NumLabel, key)
+ delete(sample.NumUnit, key)
+ }
+}
+
// DiffBaseSample returns true if a sample belongs to the diff base and false
// otherwise.
func (s *Sample) DiffBaseSample() bool {
diff --git a/vendor/github.com/google/pprof/profile/proto.go b/vendor/github.com/google/pprof/profile/proto.go
index 539ad3ab33..a15696ba16 100644
--- a/vendor/github.com/google/pprof/profile/proto.go
+++ b/vendor/github.com/google/pprof/profile/proto.go
@@ -39,11 +39,12 @@ import (
)
type buffer struct {
- field int // field tag
- typ int // proto wire type code for field
- u64 uint64
- data []byte
- tmp [16]byte
+ field int // field tag
+ typ int // proto wire type code for field
+ u64 uint64
+ data []byte
+ tmp [16]byte
+ tmpLines []Line // temporary storage used while decoding "repeated Line".
}
type decoder func(*buffer, message) error
@@ -286,7 +287,6 @@ func decodeInt64s(b *buffer, x *[]int64) error {
if b.typ == 2 {
// Packed encoding
data := b.data
- tmp := make([]int64, 0, len(data)) // Maximally sized
for len(data) > 0 {
var u uint64
var err error
@@ -294,9 +294,8 @@ func decodeInt64s(b *buffer, x *[]int64) error {
if u, data, err = decodeVarint(data); err != nil {
return err
}
- tmp = append(tmp, int64(u))
+ *x = append(*x, int64(u))
}
- *x = append(*x, tmp...)
return nil
}
var i int64
@@ -319,7 +318,6 @@ func decodeUint64s(b *buffer, x *[]uint64) error {
if b.typ == 2 {
data := b.data
// Packed encoding
- tmp := make([]uint64, 0, len(data)) // Maximally sized
for len(data) > 0 {
var u uint64
var err error
@@ -327,9 +325,8 @@ func decodeUint64s(b *buffer, x *[]uint64) error {
if u, data, err = decodeVarint(data); err != nil {
return err
}
- tmp = append(tmp, u)
+ *x = append(*x, u)
}
- *x = append(*x, tmp...)
return nil
}
var u uint64
diff --git a/vendor/github.com/google/pprof/profile/prune.go b/vendor/github.com/google/pprof/profile/prune.go
index 02d21a8184..b2f9fd5466 100644
--- a/vendor/github.com/google/pprof/profile/prune.go
+++ b/vendor/github.com/google/pprof/profile/prune.go
@@ -62,15 +62,31 @@ func (p *Profile) Prune(dropRx, keepRx *regexp.Regexp) {
prune := make(map[uint64]bool)
pruneBeneath := make(map[uint64]bool)
+ // simplifyFunc can be expensive, so cache results.
+ // Note that the same function name can be encountered many times due
+ // different lines and addresses in the same function.
+ pruneCache := map[string]bool{} // Map from function to whether or not to prune
+ pruneFromHere := func(s string) bool {
+ if r, ok := pruneCache[s]; ok {
+ return r
+ }
+ funcName := simplifyFunc(s)
+ if dropRx.MatchString(funcName) {
+ if keepRx == nil || !keepRx.MatchString(funcName) {
+ pruneCache[s] = true
+ return true
+ }
+ }
+ pruneCache[s] = false
+ return false
+ }
+
for _, loc := range p.Location {
var i int
for i = len(loc.Line) - 1; i >= 0; i-- {
if fn := loc.Line[i].Function; fn != nil && fn.Name != "" {
- funcName := simplifyFunc(fn.Name)
- if dropRx.MatchString(funcName) {
- if keepRx == nil || !keepRx.MatchString(funcName) {
- break
- }
+ if pruneFromHere(fn.Name) {
+ break
}
}
}
diff --git a/vendor/github.com/jongio/azidext/go/azidext/azure_identity_credential_adapter.go b/vendor/github.com/jongio/azidext/go/azidext/azure_identity_credential_adapter.go
deleted file mode 100644
index 553b95a271..0000000000
--- a/vendor/github.com/jongio/azidext/go/azidext/azure_identity_credential_adapter.go
+++ /dev/null
@@ -1,118 +0,0 @@
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License.
-
-package azidext
-
-import (
- "errors"
- "net/http"
-
- "github.com/Azure/azure-sdk-for-go/sdk/azcore"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
- "github.com/Azure/azure-sdk-for-go/sdk/azidentity"
- "github.com/Azure/go-autorest/autorest"
-)
-
-// NewTokenCredentialAdapter is used to adapt an azcore.TokenCredential to an autorest.Authorizer
-func NewTokenCredentialAdapter(credential azcore.TokenCredential, scopes []string) autorest.Authorizer {
- tkPolicy := runtime.NewBearerTokenPolicy(credential, scopes, nil)
- return &policyAdapter{
- pl: runtime.NewPipeline("azidext", "v0.4.0", runtime.PipelineOptions{
- PerRetry: []policy.Policy{tkPolicy, nullPolicy{}},
- }, nil),
- }
-}
-
-type policyAdapter struct {
- pl runtime.Pipeline
-}
-
-// WithAuthorization implements the autorest.Authorizer interface for type policyAdapter.
-func (ca *policyAdapter) WithAuthorization() autorest.PrepareDecorator {
- return func(p autorest.Preparer) autorest.Preparer {
- return autorest.PreparerFunc(func(r *http.Request) (*http.Request, error) {
- r, err := p.Prepare(r)
- if err != nil {
- return r, err
- }
- // create a dummy request
- req, err := runtime.NewRequest(r.Context(), r.Method, r.URL.String())
- if err != nil {
- return r, err
- }
- _, err = ca.pl.Do(req)
- // if the authentication failed due to invalid/missing credentials
- // return a wrapped error so the retry policy won't kick in.
- type nonRetriable interface {
- NonRetriable()
- }
- var nre nonRetriable
- if errors.As(err, &nre) {
- return r, &tokenRefreshError{
- inner: err,
- }
- }
- // some other error
- if err != nil {
- return r, err
- }
- // copy the authorization header to the real request
- const authHeader = "Authorization"
- r.Header.Set(authHeader, req.Raw().Header.Get(authHeader))
- return r, err
- })
- }
-}
-
-// DefaultManagementScope is the default credential scope for Azure Resource Management.
-const DefaultManagementScope = "https://management.azure.com//.default"
-
-// DefaultAzureCredentialOptions contains credential and authentication policy options.
-type DefaultAzureCredentialOptions struct {
- // DefaultCredential contains configuration options passed to azidentity.NewDefaultAzureCredential().
- // Set this to nil to accept the underlying default behavior.
- DefaultCredential *azidentity.DefaultAzureCredentialOptions
-
- // Scopes contains the list of permission scopes required for the token.
- // Setting this to nil will use the DefaultManagementScope when acquiring a token.
- Scopes []string
-}
-
-// NewDefaultAzureCredentialAdapter adapts azcore.NewDefaultAzureCredential to an autorest.Authorizer.
-func NewDefaultAzureCredentialAdapter(options *DefaultAzureCredentialOptions) (autorest.Authorizer, error) {
- if options == nil {
- options = &DefaultAzureCredentialOptions{
- Scopes: []string{DefaultManagementScope},
- }
- }
- cred, err := azidentity.NewDefaultAzureCredential(options.DefaultCredential)
- if err != nil {
- return nil, err
- }
- return NewTokenCredentialAdapter(cred, options.Scopes), nil
-}
-
-// dummy policy to terminate the pipeline
-type nullPolicy struct{}
-
-func (nullPolicy) Do(req *policy.Request) (*http.Response, error) {
- return &http.Response{StatusCode: http.StatusOK}, nil
-}
-
-// error type returned to prevent the retry policy from retrying the request
-type tokenRefreshError struct {
- inner error
-}
-
-func (t *tokenRefreshError) Error() string {
- return t.inner.Error()
-}
-
-func (t *tokenRefreshError) Response() *http.Response {
- return nil
-}
-
-func (t *tokenRefreshError) Unwrap() error {
- return t.inner
-}
diff --git a/vendor/github.com/kubernetes-csi/csi-lib-utils/protosanitizer/protosanitizer.go b/vendor/github.com/kubernetes-csi/csi-lib-utils/protosanitizer/protosanitizer.go
index af64a7b27d..cad31f17f2 100644
--- a/vendor/github.com/kubernetes-csi/csi-lib-utils/protosanitizer/protosanitizer.go
+++ b/vendor/github.com/kubernetes-csi/csi-lib-utils/protosanitizer/protosanitizer.go
@@ -26,7 +26,6 @@ import (
"github.com/golang/protobuf/descriptor"
"github.com/golang/protobuf/proto"
- protobuf "github.com/golang/protobuf/protoc-gen-go/descriptor"
protobufdescriptor "github.com/golang/protobuf/protoc-gen-go/descriptor"
)
@@ -56,7 +55,7 @@ func StripSecretsCSI03(msg interface{}) fmt.Stringer {
type stripSecrets struct {
msg interface{}
- isSecretField func(field *protobuf.FieldDescriptorProto) bool
+ isSecretField func(field *protobufdescriptor.FieldDescriptorProto) bool
}
func (s *stripSecrets) String() string {
@@ -110,7 +109,7 @@ func (s *stripSecrets) strip(parsed interface{}, msg interface{}) {
if _, ok := parsedFields[field.GetName()]; ok {
parsedFields[field.GetName()] = "***stripped***"
}
- } else if field.GetType() == protobuf.FieldDescriptorProto_TYPE_MESSAGE {
+ } else if field.GetType() == protobufdescriptor.FieldDescriptorProto_TYPE_MESSAGE {
// When we get here,
// the type name is something like ".csi.v1.CapacityRange" (leading dot!)
// and looking up "csi.v1.CapacityRange"
@@ -150,7 +149,7 @@ func (s *stripSecrets) strip(parsed interface{}, msg interface{}) {
// isCSI1Secret uses the csi.E_CsiSecret extension from CSI 1.0 to
// determine whether a field contains secrets.
-func isCSI1Secret(field *protobuf.FieldDescriptorProto) bool {
+func isCSI1Secret(field *protobufdescriptor.FieldDescriptorProto) bool {
ex, err := proto.GetExtension(field.Options, e_CsiSecret)
return err == nil && ex != nil && *ex.(*bool)
}
@@ -172,6 +171,6 @@ var e_CsiSecret = &proto.ExtensionDesc{
// isCSI03Secret relies on the naming convention in CSI <= 0.3
// to determine whether a field contains secrets.
-func isCSI03Secret(field *protobuf.FieldDescriptorProto) bool {
+func isCSI03Secret(field *protobufdescriptor.FieldDescriptorProto) bool {
return strings.HasSuffix(field.GetName(), "_secrets")
}
diff --git a/vendor/github.com/kubernetes-csi/csi-proxy/client/apiversion/version.go b/vendor/github.com/kubernetes-csi/csi-proxy/client/apiversion/version.go
index 1405298251..e4f0cd0426 100644
--- a/vendor/github.com/kubernetes-csi/csi-proxy/client/apiversion/version.go
+++ b/vendor/github.com/kubernetes-csi/csi-proxy/client/apiversion/version.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/vendor/github.com/kubernetes-csi/csi-proxy/client/groups/disk/v1/client_generated.go b/vendor/github.com/kubernetes-csi/csi-proxy/client/groups/disk/v1/client_generated.go
index 524814e958..9c932a77f1 100644
--- a/vendor/github.com/kubernetes-csi/csi-proxy/client/groups/disk/v1/client_generated.go
+++ b/vendor/github.com/kubernetes-csi/csi-proxy/client/groups/disk/v1/client_generated.go
@@ -8,7 +8,7 @@ import (
"github.com/Microsoft/go-winio"
"github.com/kubernetes-csi/csi-proxy/client"
- v1 "github.com/kubernetes-csi/csi-proxy/client/api/disk/v1"
+ "github.com/kubernetes-csi/csi-proxy/client/api/disk/v1"
"github.com/kubernetes-csi/csi-proxy/client/apiversion"
"google.golang.org/grpc"
)
diff --git a/vendor/github.com/kubernetes-csi/csi-proxy/client/groups/filesystem/v1/client_generated.go b/vendor/github.com/kubernetes-csi/csi-proxy/client/groups/filesystem/v1/client_generated.go
index b3ffc7fa73..cf136c9d71 100644
--- a/vendor/github.com/kubernetes-csi/csi-proxy/client/groups/filesystem/v1/client_generated.go
+++ b/vendor/github.com/kubernetes-csi/csi-proxy/client/groups/filesystem/v1/client_generated.go
@@ -8,7 +8,7 @@ import (
"github.com/Microsoft/go-winio"
"github.com/kubernetes-csi/csi-proxy/client"
- v1 "github.com/kubernetes-csi/csi-proxy/client/api/filesystem/v1"
+ "github.com/kubernetes-csi/csi-proxy/client/api/filesystem/v1"
"github.com/kubernetes-csi/csi-proxy/client/apiversion"
"google.golang.org/grpc"
)
diff --git a/vendor/github.com/kubernetes-csi/csi-proxy/client/groups/volume/v1/client_generated.go b/vendor/github.com/kubernetes-csi/csi-proxy/client/groups/volume/v1/client_generated.go
index 659ed34b83..8c9a3cd773 100644
--- a/vendor/github.com/kubernetes-csi/csi-proxy/client/groups/volume/v1/client_generated.go
+++ b/vendor/github.com/kubernetes-csi/csi-proxy/client/groups/volume/v1/client_generated.go
@@ -8,7 +8,7 @@ import (
"github.com/Microsoft/go-winio"
"github.com/kubernetes-csi/csi-proxy/client"
- v1 "github.com/kubernetes-csi/csi-proxy/client/api/volume/v1"
+ "github.com/kubernetes-csi/csi-proxy/client/api/volume/v1"
"github.com/kubernetes-csi/csi-proxy/client/apiversion"
"google.golang.org/grpc"
)
diff --git a/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md b/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md
deleted file mode 100644
index 1955f2878c..0000000000
--- a/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md
+++ /dev/null
@@ -1,73 +0,0 @@
-## unreleased
-
-* Fix regression where `*time.Time` value would be set to empty and not be sent
- to decode hooks properly [GH-232]
-
-## 1.4.0
-
-* A new decode hook type `DecodeHookFuncValue` has been added that has
- access to the full values. [GH-183]
-* Squash is now supported with embedded fields that are struct pointers [GH-205]
-* Empty strings will convert to 0 for all numeric types when weakly decoding [GH-206]
-
-## 1.3.3
-
-* Decoding maps from maps creates a settable value for decode hooks [GH-203]
-
-## 1.3.2
-
-* Decode into interface type with a struct value is supported [GH-187]
-
-## 1.3.1
-
-* Squash should only squash embedded structs. [GH-194]
-
-## 1.3.0
-
-* Added `",omitempty"` support. This will ignore zero values in the source
- structure when encoding. [GH-145]
-
-## 1.2.3
-
-* Fix duplicate entries in Keys list with pointer values. [GH-185]
-
-## 1.2.2
-
-* Do not add unsettable (unexported) values to the unused metadata key
- or "remain" value. [GH-150]
-
-## 1.2.1
-
-* Go modules checksum mismatch fix
-
-## 1.2.0
-
-* Added support to capture unused values in a field using the `",remain"` value
- in the mapstructure tag. There is an example to showcase usage.
-* Added `DecoderConfig` option to always squash embedded structs
-* `json.Number` can decode into `uint` types
-* Empty slices are preserved and not replaced with nil slices
-* Fix panic that can occur in when decoding a map into a nil slice of structs
-* Improved package documentation for godoc
-
-## 1.1.2
-
-* Fix error when decode hook decodes interface implementation into interface
- type. [GH-140]
-
-## 1.1.1
-
-* Fix panic that can happen in `decodePtr`
-
-## 1.1.0
-
-* Added `StringToIPHookFunc` to convert `string` to `net.IP` and `net.IPNet` [GH-133]
-* Support struct to struct decoding [GH-137]
-* If source map value is nil, then destination map value is nil (instead of empty)
-* If source slice value is nil, then destination slice value is nil (instead of empty)
-* If source pointer is nil, then destination pointer is set to nil (instead of
- allocated zero value of type)
-
-## 1.0.0
-
-* Initial tagged stable release.
diff --git a/vendor/github.com/mitchellh/mapstructure/README.md b/vendor/github.com/mitchellh/mapstructure/README.md
deleted file mode 100644
index 0018dc7d9f..0000000000
--- a/vendor/github.com/mitchellh/mapstructure/README.md
+++ /dev/null
@@ -1,46 +0,0 @@
-# mapstructure [![Godoc](https://godoc.org/github.com/mitchellh/mapstructure?status.svg)](https://godoc.org/github.com/mitchellh/mapstructure)
-
-mapstructure is a Go library for decoding generic map values to structures
-and vice versa, while providing helpful error handling.
-
-This library is most useful when decoding values from some data stream (JSON,
-Gob, etc.) where you don't _quite_ know the structure of the underlying data
-until you read a part of it. You can therefore read a `map[string]interface{}`
-and use this library to decode it into the proper underlying native Go
-structure.
-
-## Installation
-
-Standard `go get`:
-
-```
-$ go get github.com/mitchellh/mapstructure
-```
-
-## Usage & Example
-
-For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/mapstructure).
-
-The `Decode` function has examples associated with it there.
-
-## But Why?!
-
-Go offers fantastic standard libraries for decoding formats such as JSON.
-The standard method is to have a struct pre-created, and populate that struct
-from the bytes of the encoded format. This is great, but the problem is if
-you have configuration or an encoding that changes slightly depending on
-specific fields. For example, consider this JSON:
-
-```json
-{
- "type": "person",
- "name": "Mitchell"
-}
-```
-
-Perhaps we can't populate a specific structure without first reading
-the "type" field from the JSON. We could always do two passes over the
-decoding of the JSON (reading the "type" first, and the rest later).
-However, it is much simpler to just decode this into a `map[string]interface{}`
-structure, read the "type" key, then use something like this library
-to decode it into the proper structure.
diff --git a/vendor/github.com/mitchellh/mapstructure/decode_hooks.go b/vendor/github.com/mitchellh/mapstructure/decode_hooks.go
deleted file mode 100644
index 92e6f76fff..0000000000
--- a/vendor/github.com/mitchellh/mapstructure/decode_hooks.go
+++ /dev/null
@@ -1,256 +0,0 @@
-package mapstructure
-
-import (
- "encoding"
- "errors"
- "fmt"
- "net"
- "reflect"
- "strconv"
- "strings"
- "time"
-)
-
-// typedDecodeHook takes a raw DecodeHookFunc (an interface{}) and turns
-// it into the proper DecodeHookFunc type, such as DecodeHookFuncType.
-func typedDecodeHook(h DecodeHookFunc) DecodeHookFunc {
- // Create variables here so we can reference them with the reflect pkg
- var f1 DecodeHookFuncType
- var f2 DecodeHookFuncKind
- var f3 DecodeHookFuncValue
-
- // Fill in the variables into this interface and the rest is done
- // automatically using the reflect package.
- potential := []interface{}{f1, f2, f3}
-
- v := reflect.ValueOf(h)
- vt := v.Type()
- for _, raw := range potential {
- pt := reflect.ValueOf(raw).Type()
- if vt.ConvertibleTo(pt) {
- return v.Convert(pt).Interface()
- }
- }
-
- return nil
-}
-
-// DecodeHookExec executes the given decode hook. This should be used
-// since it'll naturally degrade to the older backwards compatible DecodeHookFunc
-// that took reflect.Kind instead of reflect.Type.
-func DecodeHookExec(
- raw DecodeHookFunc,
- from reflect.Value, to reflect.Value) (interface{}, error) {
-
- switch f := typedDecodeHook(raw).(type) {
- case DecodeHookFuncType:
- return f(from.Type(), to.Type(), from.Interface())
- case DecodeHookFuncKind:
- return f(from.Kind(), to.Kind(), from.Interface())
- case DecodeHookFuncValue:
- return f(from, to)
- default:
- return nil, errors.New("invalid decode hook signature")
- }
-}
-
-// ComposeDecodeHookFunc creates a single DecodeHookFunc that
-// automatically composes multiple DecodeHookFuncs.
-//
-// The composed funcs are called in order, with the result of the
-// previous transformation.
-func ComposeDecodeHookFunc(fs ...DecodeHookFunc) DecodeHookFunc {
- return func(f reflect.Value, t reflect.Value) (interface{}, error) {
- var err error
- var data interface{}
- newFrom := f
- for _, f1 := range fs {
- data, err = DecodeHookExec(f1, newFrom, t)
- if err != nil {
- return nil, err
- }
- newFrom = reflect.ValueOf(data)
- }
-
- return data, nil
- }
-}
-
-// StringToSliceHookFunc returns a DecodeHookFunc that converts
-// string to []string by splitting on the given sep.
-func StringToSliceHookFunc(sep string) DecodeHookFunc {
- return func(
- f reflect.Kind,
- t reflect.Kind,
- data interface{}) (interface{}, error) {
- if f != reflect.String || t != reflect.Slice {
- return data, nil
- }
-
- raw := data.(string)
- if raw == "" {
- return []string{}, nil
- }
-
- return strings.Split(raw, sep), nil
- }
-}
-
-// StringToTimeDurationHookFunc returns a DecodeHookFunc that converts
-// strings to time.Duration.
-func StringToTimeDurationHookFunc() DecodeHookFunc {
- return func(
- f reflect.Type,
- t reflect.Type,
- data interface{}) (interface{}, error) {
- if f.Kind() != reflect.String {
- return data, nil
- }
- if t != reflect.TypeOf(time.Duration(5)) {
- return data, nil
- }
-
- // Convert it by parsing
- return time.ParseDuration(data.(string))
- }
-}
-
-// StringToIPHookFunc returns a DecodeHookFunc that converts
-// strings to net.IP
-func StringToIPHookFunc() DecodeHookFunc {
- return func(
- f reflect.Type,
- t reflect.Type,
- data interface{}) (interface{}, error) {
- if f.Kind() != reflect.String {
- return data, nil
- }
- if t != reflect.TypeOf(net.IP{}) {
- return data, nil
- }
-
- // Convert it by parsing
- ip := net.ParseIP(data.(string))
- if ip == nil {
- return net.IP{}, fmt.Errorf("failed parsing ip %v", data)
- }
-
- return ip, nil
- }
-}
-
-// StringToIPNetHookFunc returns a DecodeHookFunc that converts
-// strings to net.IPNet
-func StringToIPNetHookFunc() DecodeHookFunc {
- return func(
- f reflect.Type,
- t reflect.Type,
- data interface{}) (interface{}, error) {
- if f.Kind() != reflect.String {
- return data, nil
- }
- if t != reflect.TypeOf(net.IPNet{}) {
- return data, nil
- }
-
- // Convert it by parsing
- _, net, err := net.ParseCIDR(data.(string))
- return net, err
- }
-}
-
-// StringToTimeHookFunc returns a DecodeHookFunc that converts
-// strings to time.Time.
-func StringToTimeHookFunc(layout string) DecodeHookFunc {
- return func(
- f reflect.Type,
- t reflect.Type,
- data interface{}) (interface{}, error) {
- if f.Kind() != reflect.String {
- return data, nil
- }
- if t != reflect.TypeOf(time.Time{}) {
- return data, nil
- }
-
- // Convert it by parsing
- return time.Parse(layout, data.(string))
- }
-}
-
-// WeaklyTypedHook is a DecodeHookFunc which adds support for weak typing to
-// the decoder.
-//
-// Note that this is significantly different from the WeaklyTypedInput option
-// of the DecoderConfig.
-func WeaklyTypedHook(
- f reflect.Kind,
- t reflect.Kind,
- data interface{}) (interface{}, error) {
- dataVal := reflect.ValueOf(data)
- switch t {
- case reflect.String:
- switch f {
- case reflect.Bool:
- if dataVal.Bool() {
- return "1", nil
- }
- return "0", nil
- case reflect.Float32:
- return strconv.FormatFloat(dataVal.Float(), 'f', -1, 64), nil
- case reflect.Int:
- return strconv.FormatInt(dataVal.Int(), 10), nil
- case reflect.Slice:
- dataType := dataVal.Type()
- elemKind := dataType.Elem().Kind()
- if elemKind == reflect.Uint8 {
- return string(dataVal.Interface().([]uint8)), nil
- }
- case reflect.Uint:
- return strconv.FormatUint(dataVal.Uint(), 10), nil
- }
- }
-
- return data, nil
-}
-
-func RecursiveStructToMapHookFunc() DecodeHookFunc {
- return func(f reflect.Value, t reflect.Value) (interface{}, error) {
- if f.Kind() != reflect.Struct {
- return f.Interface(), nil
- }
-
- var i interface{} = struct{}{}
- if t.Type() != reflect.TypeOf(&i).Elem() {
- return f.Interface(), nil
- }
-
- m := make(map[string]interface{})
- t.Set(reflect.ValueOf(m))
-
- return f.Interface(), nil
- }
-}
-
-// TextUnmarshallerHookFunc returns a DecodeHookFunc that applies
-// strings to the UnmarshalText function, when the target type
-// implements the encoding.TextUnmarshaler interface
-func TextUnmarshallerHookFunc() DecodeHookFuncType {
- return func(
- f reflect.Type,
- t reflect.Type,
- data interface{}) (interface{}, error) {
- if f.Kind() != reflect.String {
- return data, nil
- }
- result := reflect.New(t).Interface()
- unmarshaller, ok := result.(encoding.TextUnmarshaler)
- if !ok {
- return data, nil
- }
- if err := unmarshaller.UnmarshalText([]byte(data.(string))); err != nil {
- return nil, err
- }
- return result, nil
- }
-}
diff --git a/vendor/github.com/mitchellh/mapstructure/error.go b/vendor/github.com/mitchellh/mapstructure/error.go
deleted file mode 100644
index 47a99e5af3..0000000000
--- a/vendor/github.com/mitchellh/mapstructure/error.go
+++ /dev/null
@@ -1,50 +0,0 @@
-package mapstructure
-
-import (
- "errors"
- "fmt"
- "sort"
- "strings"
-)
-
-// Error implements the error interface and can represents multiple
-// errors that occur in the course of a single decode.
-type Error struct {
- Errors []string
-}
-
-func (e *Error) Error() string {
- points := make([]string, len(e.Errors))
- for i, err := range e.Errors {
- points[i] = fmt.Sprintf("* %s", err)
- }
-
- sort.Strings(points)
- return fmt.Sprintf(
- "%d error(s) decoding:\n\n%s",
- len(e.Errors), strings.Join(points, "\n"))
-}
-
-// WrappedErrors implements the errwrap.Wrapper interface to make this
-// return value more useful with the errwrap and go-multierror libraries.
-func (e *Error) WrappedErrors() []error {
- if e == nil {
- return nil
- }
-
- result := make([]error, len(e.Errors))
- for i, e := range e.Errors {
- result[i] = errors.New(e)
- }
-
- return result
-}
-
-func appendErrors(errors []string, err error) []string {
- switch e := err.(type) {
- case *Error:
- return append(errors, e.Errors...)
- default:
- return append(errors, e.Error())
- }
-}
diff --git a/vendor/github.com/mitchellh/mapstructure/mapstructure.go b/vendor/github.com/mitchellh/mapstructure/mapstructure.go
deleted file mode 100644
index 3643901f55..0000000000
--- a/vendor/github.com/mitchellh/mapstructure/mapstructure.go
+++ /dev/null
@@ -1,1462 +0,0 @@
-// Package mapstructure exposes functionality to convert one arbitrary
-// Go type into another, typically to convert a map[string]interface{}
-// into a native Go structure.
-//
-// The Go structure can be arbitrarily complex, containing slices,
-// other structs, etc. and the decoder will properly decode nested
-// maps and so on into the proper structures in the native Go struct.
-// See the examples to see what the decoder is capable of.
-//
-// The simplest function to start with is Decode.
-//
-// Field Tags
-//
-// When decoding to a struct, mapstructure will use the field name by
-// default to perform the mapping. For example, if a struct has a field
-// "Username" then mapstructure will look for a key in the source value
-// of "username" (case insensitive).
-//
-// type User struct {
-// Username string
-// }
-//
-// You can change the behavior of mapstructure by using struct tags.
-// The default struct tag that mapstructure looks for is "mapstructure"
-// but you can customize it using DecoderConfig.
-//
-// Renaming Fields
-//
-// To rename the key that mapstructure looks for, use the "mapstructure"
-// tag and set a value directly. For example, to change the "username" example
-// above to "user":
-//
-// type User struct {
-// Username string `mapstructure:"user"`
-// }
-//
-// Embedded Structs and Squashing
-//
-// Embedded structs are treated as if they're another field with that name.
-// By default, the two structs below are equivalent when decoding with
-// mapstructure:
-//
-// type Person struct {
-// Name string
-// }
-//
-// type Friend struct {
-// Person
-// }
-//
-// type Friend struct {
-// Person Person
-// }
-//
-// This would require an input that looks like below:
-//
-// map[string]interface{}{
-// "person": map[string]interface{}{"name": "alice"},
-// }
-//
-// If your "person" value is NOT nested, then you can append ",squash" to
-// your tag value and mapstructure will treat it as if the embedded struct
-// were part of the struct directly. Example:
-//
-// type Friend struct {
-// Person `mapstructure:",squash"`
-// }
-//
-// Now the following input would be accepted:
-//
-// map[string]interface{}{
-// "name": "alice",
-// }
-//
-// When decoding from a struct to a map, the squash tag squashes the struct
-// fields into a single map. Using the example structs from above:
-//
-// Friend{Person: Person{Name: "alice"}}
-//
-// Will be decoded into a map:
-//
-// map[string]interface{}{
-// "name": "alice",
-// }
-//
-// DecoderConfig has a field that changes the behavior of mapstructure
-// to always squash embedded structs.
-//
-// Remainder Values
-//
-// If there are any unmapped keys in the source value, mapstructure by
-// default will silently ignore them. You can error by setting ErrorUnused
-// in DecoderConfig. If you're using Metadata you can also maintain a slice
-// of the unused keys.
-//
-// You can also use the ",remain" suffix on your tag to collect all unused
-// values in a map. The field with this tag MUST be a map type and should
-// probably be a "map[string]interface{}" or "map[interface{}]interface{}".
-// See example below:
-//
-// type Friend struct {
-// Name string
-// Other map[string]interface{} `mapstructure:",remain"`
-// }
-//
-// Given the input below, Other would be populated with the other
-// values that weren't used (everything but "name"):
-//
-// map[string]interface{}{
-// "name": "bob",
-// "address": "123 Maple St.",
-// }
-//
-// Omit Empty Values
-//
-// When decoding from a struct to any other value, you may use the
-// ",omitempty" suffix on your tag to omit that value if it equates to
-// the zero value. The zero value of all types is specified in the Go
-// specification.
-//
-// For example, the zero type of a numeric type is zero ("0"). If the struct
-// field value is zero and a numeric type, the field is empty, and it won't
-// be encoded into the destination type.
-//
-// type Source {
-// Age int `mapstructure:",omitempty"`
-// }
-//
-// Unexported fields
-//
-// Since unexported (private) struct fields cannot be set outside the package
-// where they are defined, the decoder will simply skip them.
-//
-// For this output type definition:
-//
-// type Exported struct {
-// private string // this unexported field will be skipped
-// Public string
-// }
-//
-// Using this map as input:
-//
-// map[string]interface{}{
-// "private": "I will be ignored",
-// "Public": "I made it through!",
-// }
-//
-// The following struct will be decoded:
-//
-// type Exported struct {
-// private: "" // field is left with an empty string (zero value)
-// Public: "I made it through!"
-// }
-//
-// Other Configuration
-//
-// mapstructure is highly configurable. See the DecoderConfig struct
-// for other features and options that are supported.
-package mapstructure
-
-import (
- "encoding/json"
- "errors"
- "fmt"
- "reflect"
- "sort"
- "strconv"
- "strings"
-)
-
-// DecodeHookFunc is the callback function that can be used for
-// data transformations. See "DecodeHook" in the DecoderConfig
-// struct.
-//
-// The type must be one of DecodeHookFuncType, DecodeHookFuncKind, or
-// DecodeHookFuncValue.
-// Values are a superset of Types (Values can return types), and Types are a
-// superset of Kinds (Types can return Kinds) and are generally a richer thing
-// to use, but Kinds are simpler if you only need those.
-//
-// The reason DecodeHookFunc is multi-typed is for backwards compatibility:
-// we started with Kinds and then realized Types were the better solution,
-// but have a promise to not break backwards compat so we now support
-// both.
-type DecodeHookFunc interface{}
-
-// DecodeHookFuncType is a DecodeHookFunc which has complete information about
-// the source and target types.
-type DecodeHookFuncType func(reflect.Type, reflect.Type, interface{}) (interface{}, error)
-
-// DecodeHookFuncKind is a DecodeHookFunc which knows only the Kinds of the
-// source and target types.
-type DecodeHookFuncKind func(reflect.Kind, reflect.Kind, interface{}) (interface{}, error)
-
-// DecodeHookFuncRaw is a DecodeHookFunc which has complete access to both the source and target
-// values.
-type DecodeHookFuncValue func(from reflect.Value, to reflect.Value) (interface{}, error)
-
-// DecoderConfig is the configuration that is used to create a new decoder
-// and allows customization of various aspects of decoding.
-type DecoderConfig struct {
- // DecodeHook, if set, will be called before any decoding and any
- // type conversion (if WeaklyTypedInput is on). This lets you modify
- // the values before they're set down onto the resulting struct. The
- // DecodeHook is called for every map and value in the input. This means
- // that if a struct has embedded fields with squash tags the decode hook
- // is called only once with all of the input data, not once for each
- // embedded struct.
- //
- // If an error is returned, the entire decode will fail with that error.
- DecodeHook DecodeHookFunc
-
- // If ErrorUnused is true, then it is an error for there to exist
- // keys in the original map that were unused in the decoding process
- // (extra keys).
- ErrorUnused bool
-
- // ZeroFields, if set to true, will zero fields before writing them.
- // For example, a map will be emptied before decoded values are put in
- // it. If this is false, a map will be merged.
- ZeroFields bool
-
- // If WeaklyTypedInput is true, the decoder will make the following
- // "weak" conversions:
- //
- // - bools to string (true = "1", false = "0")
- // - numbers to string (base 10)
- // - bools to int/uint (true = 1, false = 0)
- // - strings to int/uint (base implied by prefix)
- // - int to bool (true if value != 0)
- // - string to bool (accepts: 1, t, T, TRUE, true, True, 0, f, F,
- // FALSE, false, False. Anything else is an error)
- // - empty array = empty map and vice versa
- // - negative numbers to overflowed uint values (base 10)
- // - slice of maps to a merged map
- // - single values are converted to slices if required. Each
- // element is weakly decoded. For example: "4" can become []int{4}
- // if the target type is an int slice.
- //
- WeaklyTypedInput bool
-
- // Squash will squash embedded structs. A squash tag may also be
- // added to an individual struct field using a tag. For example:
- //
- // type Parent struct {
- // Child `mapstructure:",squash"`
- // }
- Squash bool
-
- // Metadata is the struct that will contain extra metadata about
- // the decoding. If this is nil, then no metadata will be tracked.
- Metadata *Metadata
-
- // Result is a pointer to the struct that will contain the decoded
- // value.
- Result interface{}
-
- // The tag name that mapstructure reads for field names. This
- // defaults to "mapstructure"
- TagName string
-}
-
-// A Decoder takes a raw interface value and turns it into structured
-// data, keeping track of rich error information along the way in case
-// anything goes wrong. Unlike the basic top-level Decode method, you can
-// more finely control how the Decoder behaves using the DecoderConfig
-// structure. The top-level Decode method is just a convenience that sets
-// up the most basic Decoder.
-type Decoder struct {
- config *DecoderConfig
-}
-
-// Metadata contains information about decoding a structure that
-// is tedious or difficult to get otherwise.
-type Metadata struct {
- // Keys are the keys of the structure which were successfully decoded
- Keys []string
-
- // Unused is a slice of keys that were found in the raw value but
- // weren't decoded since there was no matching field in the result interface
- Unused []string
-}
-
-// Decode takes an input structure and uses reflection to translate it to
-// the output structure. output must be a pointer to a map or struct.
-func Decode(input interface{}, output interface{}) error {
- config := &DecoderConfig{
- Metadata: nil,
- Result: output,
- }
-
- decoder, err := NewDecoder(config)
- if err != nil {
- return err
- }
-
- return decoder.Decode(input)
-}
-
-// WeakDecode is the same as Decode but is shorthand to enable
-// WeaklyTypedInput. See DecoderConfig for more info.
-func WeakDecode(input, output interface{}) error {
- config := &DecoderConfig{
- Metadata: nil,
- Result: output,
- WeaklyTypedInput: true,
- }
-
- decoder, err := NewDecoder(config)
- if err != nil {
- return err
- }
-
- return decoder.Decode(input)
-}
-
-// DecodeMetadata is the same as Decode, but is shorthand to
-// enable metadata collection. See DecoderConfig for more info.
-func DecodeMetadata(input interface{}, output interface{}, metadata *Metadata) error {
- config := &DecoderConfig{
- Metadata: metadata,
- Result: output,
- }
-
- decoder, err := NewDecoder(config)
- if err != nil {
- return err
- }
-
- return decoder.Decode(input)
-}
-
-// WeakDecodeMetadata is the same as Decode, but is shorthand to
-// enable both WeaklyTypedInput and metadata collection. See
-// DecoderConfig for more info.
-func WeakDecodeMetadata(input interface{}, output interface{}, metadata *Metadata) error {
- config := &DecoderConfig{
- Metadata: metadata,
- Result: output,
- WeaklyTypedInput: true,
- }
-
- decoder, err := NewDecoder(config)
- if err != nil {
- return err
- }
-
- return decoder.Decode(input)
-}
-
-// NewDecoder returns a new decoder for the given configuration. Once
-// a decoder has been returned, the same configuration must not be used
-// again.
-func NewDecoder(config *DecoderConfig) (*Decoder, error) {
- val := reflect.ValueOf(config.Result)
- if val.Kind() != reflect.Ptr {
- return nil, errors.New("result must be a pointer")
- }
-
- val = val.Elem()
- if !val.CanAddr() {
- return nil, errors.New("result must be addressable (a pointer)")
- }
-
- if config.Metadata != nil {
- if config.Metadata.Keys == nil {
- config.Metadata.Keys = make([]string, 0)
- }
-
- if config.Metadata.Unused == nil {
- config.Metadata.Unused = make([]string, 0)
- }
- }
-
- if config.TagName == "" {
- config.TagName = "mapstructure"
- }
-
- result := &Decoder{
- config: config,
- }
-
- return result, nil
-}
-
-// Decode decodes the given raw interface to the target pointer specified
-// by the configuration.
-func (d *Decoder) Decode(input interface{}) error {
- return d.decode("", input, reflect.ValueOf(d.config.Result).Elem())
-}
-
-// Decodes an unknown data type into a specific reflection value.
-func (d *Decoder) decode(name string, input interface{}, outVal reflect.Value) error {
- var inputVal reflect.Value
- if input != nil {
- inputVal = reflect.ValueOf(input)
-
- // We need to check here if input is a typed nil. Typed nils won't
- // match the "input == nil" below so we check that here.
- if inputVal.Kind() == reflect.Ptr && inputVal.IsNil() {
- input = nil
- }
- }
-
- if input == nil {
- // If the data is nil, then we don't set anything, unless ZeroFields is set
- // to true.
- if d.config.ZeroFields {
- outVal.Set(reflect.Zero(outVal.Type()))
-
- if d.config.Metadata != nil && name != "" {
- d.config.Metadata.Keys = append(d.config.Metadata.Keys, name)
- }
- }
- return nil
- }
-
- if !inputVal.IsValid() {
- // If the input value is invalid, then we just set the value
- // to be the zero value.
- outVal.Set(reflect.Zero(outVal.Type()))
- if d.config.Metadata != nil && name != "" {
- d.config.Metadata.Keys = append(d.config.Metadata.Keys, name)
- }
- return nil
- }
-
- if d.config.DecodeHook != nil {
- // We have a DecodeHook, so let's pre-process the input.
- var err error
- input, err = DecodeHookExec(d.config.DecodeHook, inputVal, outVal)
- if err != nil {
- return fmt.Errorf("error decoding '%s': %s", name, err)
- }
- }
-
- var err error
- outputKind := getKind(outVal)
- addMetaKey := true
- switch outputKind {
- case reflect.Bool:
- err = d.decodeBool(name, input, outVal)
- case reflect.Interface:
- err = d.decodeBasic(name, input, outVal)
- case reflect.String:
- err = d.decodeString(name, input, outVal)
- case reflect.Int:
- err = d.decodeInt(name, input, outVal)
- case reflect.Uint:
- err = d.decodeUint(name, input, outVal)
- case reflect.Float32:
- err = d.decodeFloat(name, input, outVal)
- case reflect.Struct:
- err = d.decodeStruct(name, input, outVal)
- case reflect.Map:
- err = d.decodeMap(name, input, outVal)
- case reflect.Ptr:
- addMetaKey, err = d.decodePtr(name, input, outVal)
- case reflect.Slice:
- err = d.decodeSlice(name, input, outVal)
- case reflect.Array:
- err = d.decodeArray(name, input, outVal)
- case reflect.Func:
- err = d.decodeFunc(name, input, outVal)
- default:
- // If we reached this point then we weren't able to decode it
- return fmt.Errorf("%s: unsupported type: %s", name, outputKind)
- }
-
- // If we reached here, then we successfully decoded SOMETHING, so
- // mark the key as used if we're tracking metainput.
- if addMetaKey && d.config.Metadata != nil && name != "" {
- d.config.Metadata.Keys = append(d.config.Metadata.Keys, name)
- }
-
- return err
-}
-
-// This decodes a basic type (bool, int, string, etc.) and sets the
-// value to "data" of that type.
-func (d *Decoder) decodeBasic(name string, data interface{}, val reflect.Value) error {
- if val.IsValid() && val.Elem().IsValid() {
- elem := val.Elem()
-
- // If we can't address this element, then its not writable. Instead,
- // we make a copy of the value (which is a pointer and therefore
- // writable), decode into that, and replace the whole value.
- copied := false
- if !elem.CanAddr() {
- copied = true
-
- // Make *T
- copy := reflect.New(elem.Type())
-
- // *T = elem
- copy.Elem().Set(elem)
-
- // Set elem so we decode into it
- elem = copy
- }
-
- // Decode. If we have an error then return. We also return right
- // away if we're not a copy because that means we decoded directly.
- if err := d.decode(name, data, elem); err != nil || !copied {
- return err
- }
-
- // If we're a copy, we need to set te final result
- val.Set(elem.Elem())
- return nil
- }
-
- dataVal := reflect.ValueOf(data)
-
- // If the input data is a pointer, and the assigned type is the dereference
- // of that exact pointer, then indirect it so that we can assign it.
- // Example: *string to string
- if dataVal.Kind() == reflect.Ptr && dataVal.Type().Elem() == val.Type() {
- dataVal = reflect.Indirect(dataVal)
- }
-
- if !dataVal.IsValid() {
- dataVal = reflect.Zero(val.Type())
- }
-
- dataValType := dataVal.Type()
- if !dataValType.AssignableTo(val.Type()) {
- return fmt.Errorf(
- "'%s' expected type '%s', got '%s'",
- name, val.Type(), dataValType)
- }
-
- val.Set(dataVal)
- return nil
-}
-
-func (d *Decoder) decodeString(name string, data interface{}, val reflect.Value) error {
- dataVal := reflect.Indirect(reflect.ValueOf(data))
- dataKind := getKind(dataVal)
-
- converted := true
- switch {
- case dataKind == reflect.String:
- val.SetString(dataVal.String())
- case dataKind == reflect.Bool && d.config.WeaklyTypedInput:
- if dataVal.Bool() {
- val.SetString("1")
- } else {
- val.SetString("0")
- }
- case dataKind == reflect.Int && d.config.WeaklyTypedInput:
- val.SetString(strconv.FormatInt(dataVal.Int(), 10))
- case dataKind == reflect.Uint && d.config.WeaklyTypedInput:
- val.SetString(strconv.FormatUint(dataVal.Uint(), 10))
- case dataKind == reflect.Float32 && d.config.WeaklyTypedInput:
- val.SetString(strconv.FormatFloat(dataVal.Float(), 'f', -1, 64))
- case dataKind == reflect.Slice && d.config.WeaklyTypedInput,
- dataKind == reflect.Array && d.config.WeaklyTypedInput:
- dataType := dataVal.Type()
- elemKind := dataType.Elem().Kind()
- switch elemKind {
- case reflect.Uint8:
- var uints []uint8
- if dataKind == reflect.Array {
- uints = make([]uint8, dataVal.Len(), dataVal.Len())
- for i := range uints {
- uints[i] = dataVal.Index(i).Interface().(uint8)
- }
- } else {
- uints = dataVal.Interface().([]uint8)
- }
- val.SetString(string(uints))
- default:
- converted = false
- }
- default:
- converted = false
- }
-
- if !converted {
- return fmt.Errorf(
- "'%s' expected type '%s', got unconvertible type '%s', value: '%v'",
- name, val.Type(), dataVal.Type(), data)
- }
-
- return nil
-}
-
-func (d *Decoder) decodeInt(name string, data interface{}, val reflect.Value) error {
- dataVal := reflect.Indirect(reflect.ValueOf(data))
- dataKind := getKind(dataVal)
- dataType := dataVal.Type()
-
- switch {
- case dataKind == reflect.Int:
- val.SetInt(dataVal.Int())
- case dataKind == reflect.Uint:
- val.SetInt(int64(dataVal.Uint()))
- case dataKind == reflect.Float32:
- val.SetInt(int64(dataVal.Float()))
- case dataKind == reflect.Bool && d.config.WeaklyTypedInput:
- if dataVal.Bool() {
- val.SetInt(1)
- } else {
- val.SetInt(0)
- }
- case dataKind == reflect.String && d.config.WeaklyTypedInput:
- str := dataVal.String()
- if str == "" {
- str = "0"
- }
-
- i, err := strconv.ParseInt(str, 0, val.Type().Bits())
- if err == nil {
- val.SetInt(i)
- } else {
- return fmt.Errorf("cannot parse '%s' as int: %s", name, err)
- }
- case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number":
- jn := data.(json.Number)
- i, err := jn.Int64()
- if err != nil {
- return fmt.Errorf(
- "error decoding json.Number into %s: %s", name, err)
- }
- val.SetInt(i)
- default:
- return fmt.Errorf(
- "'%s' expected type '%s', got unconvertible type '%s', value: '%v'",
- name, val.Type(), dataVal.Type(), data)
- }
-
- return nil
-}
-
-func (d *Decoder) decodeUint(name string, data interface{}, val reflect.Value) error {
- dataVal := reflect.Indirect(reflect.ValueOf(data))
- dataKind := getKind(dataVal)
- dataType := dataVal.Type()
-
- switch {
- case dataKind == reflect.Int:
- i := dataVal.Int()
- if i < 0 && !d.config.WeaklyTypedInput {
- return fmt.Errorf("cannot parse '%s', %d overflows uint",
- name, i)
- }
- val.SetUint(uint64(i))
- case dataKind == reflect.Uint:
- val.SetUint(dataVal.Uint())
- case dataKind == reflect.Float32:
- f := dataVal.Float()
- if f < 0 && !d.config.WeaklyTypedInput {
- return fmt.Errorf("cannot parse '%s', %f overflows uint",
- name, f)
- }
- val.SetUint(uint64(f))
- case dataKind == reflect.Bool && d.config.WeaklyTypedInput:
- if dataVal.Bool() {
- val.SetUint(1)
- } else {
- val.SetUint(0)
- }
- case dataKind == reflect.String && d.config.WeaklyTypedInput:
- str := dataVal.String()
- if str == "" {
- str = "0"
- }
-
- i, err := strconv.ParseUint(str, 0, val.Type().Bits())
- if err == nil {
- val.SetUint(i)
- } else {
- return fmt.Errorf("cannot parse '%s' as uint: %s", name, err)
- }
- case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number":
- jn := data.(json.Number)
- i, err := jn.Int64()
- if err != nil {
- return fmt.Errorf(
- "error decoding json.Number into %s: %s", name, err)
- }
- if i < 0 && !d.config.WeaklyTypedInput {
- return fmt.Errorf("cannot parse '%s', %d overflows uint",
- name, i)
- }
- val.SetUint(uint64(i))
- default:
- return fmt.Errorf(
- "'%s' expected type '%s', got unconvertible type '%s', value: '%v'",
- name, val.Type(), dataVal.Type(), data)
- }
-
- return nil
-}
-
-func (d *Decoder) decodeBool(name string, data interface{}, val reflect.Value) error {
- dataVal := reflect.Indirect(reflect.ValueOf(data))
- dataKind := getKind(dataVal)
-
- switch {
- case dataKind == reflect.Bool:
- val.SetBool(dataVal.Bool())
- case dataKind == reflect.Int && d.config.WeaklyTypedInput:
- val.SetBool(dataVal.Int() != 0)
- case dataKind == reflect.Uint && d.config.WeaklyTypedInput:
- val.SetBool(dataVal.Uint() != 0)
- case dataKind == reflect.Float32 && d.config.WeaklyTypedInput:
- val.SetBool(dataVal.Float() != 0)
- case dataKind == reflect.String && d.config.WeaklyTypedInput:
- b, err := strconv.ParseBool(dataVal.String())
- if err == nil {
- val.SetBool(b)
- } else if dataVal.String() == "" {
- val.SetBool(false)
- } else {
- return fmt.Errorf("cannot parse '%s' as bool: %s", name, err)
- }
- default:
- return fmt.Errorf(
- "'%s' expected type '%s', got unconvertible type '%s', value: '%v'",
- name, val.Type(), dataVal.Type(), data)
- }
-
- return nil
-}
-
-func (d *Decoder) decodeFloat(name string, data interface{}, val reflect.Value) error {
- dataVal := reflect.Indirect(reflect.ValueOf(data))
- dataKind := getKind(dataVal)
- dataType := dataVal.Type()
-
- switch {
- case dataKind == reflect.Int:
- val.SetFloat(float64(dataVal.Int()))
- case dataKind == reflect.Uint:
- val.SetFloat(float64(dataVal.Uint()))
- case dataKind == reflect.Float32:
- val.SetFloat(dataVal.Float())
- case dataKind == reflect.Bool && d.config.WeaklyTypedInput:
- if dataVal.Bool() {
- val.SetFloat(1)
- } else {
- val.SetFloat(0)
- }
- case dataKind == reflect.String && d.config.WeaklyTypedInput:
- str := dataVal.String()
- if str == "" {
- str = "0"
- }
-
- f, err := strconv.ParseFloat(str, val.Type().Bits())
- if err == nil {
- val.SetFloat(f)
- } else {
- return fmt.Errorf("cannot parse '%s' as float: %s", name, err)
- }
- case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number":
- jn := data.(json.Number)
- i, err := jn.Float64()
- if err != nil {
- return fmt.Errorf(
- "error decoding json.Number into %s: %s", name, err)
- }
- val.SetFloat(i)
- default:
- return fmt.Errorf(
- "'%s' expected type '%s', got unconvertible type '%s', value: '%v'",
- name, val.Type(), dataVal.Type(), data)
- }
-
- return nil
-}
-
-func (d *Decoder) decodeMap(name string, data interface{}, val reflect.Value) error {
- valType := val.Type()
- valKeyType := valType.Key()
- valElemType := valType.Elem()
-
- // By default we overwrite keys in the current map
- valMap := val
-
- // If the map is nil or we're purposely zeroing fields, make a new map
- if valMap.IsNil() || d.config.ZeroFields {
- // Make a new map to hold our result
- mapType := reflect.MapOf(valKeyType, valElemType)
- valMap = reflect.MakeMap(mapType)
- }
-
- // Check input type and based on the input type jump to the proper func
- dataVal := reflect.Indirect(reflect.ValueOf(data))
- switch dataVal.Kind() {
- case reflect.Map:
- return d.decodeMapFromMap(name, dataVal, val, valMap)
-
- case reflect.Struct:
- return d.decodeMapFromStruct(name, dataVal, val, valMap)
-
- case reflect.Array, reflect.Slice:
- if d.config.WeaklyTypedInput {
- return d.decodeMapFromSlice(name, dataVal, val, valMap)
- }
-
- fallthrough
-
- default:
- return fmt.Errorf("'%s' expected a map, got '%s'", name, dataVal.Kind())
- }
-}
-
-func (d *Decoder) decodeMapFromSlice(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error {
- // Special case for BC reasons (covered by tests)
- if dataVal.Len() == 0 {
- val.Set(valMap)
- return nil
- }
-
- for i := 0; i < dataVal.Len(); i++ {
- err := d.decode(
- name+"["+strconv.Itoa(i)+"]",
- dataVal.Index(i).Interface(), val)
- if err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (d *Decoder) decodeMapFromMap(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error {
- valType := val.Type()
- valKeyType := valType.Key()
- valElemType := valType.Elem()
-
- // Accumulate errors
- errors := make([]string, 0)
-
- // If the input data is empty, then we just match what the input data is.
- if dataVal.Len() == 0 {
- if dataVal.IsNil() {
- if !val.IsNil() {
- val.Set(dataVal)
- }
- } else {
- // Set to empty allocated value
- val.Set(valMap)
- }
-
- return nil
- }
-
- for _, k := range dataVal.MapKeys() {
- fieldName := name + "[" + k.String() + "]"
-
- // First decode the key into the proper type
- currentKey := reflect.Indirect(reflect.New(valKeyType))
- if err := d.decode(fieldName, k.Interface(), currentKey); err != nil {
- errors = appendErrors(errors, err)
- continue
- }
-
- // Next decode the data into the proper type
- v := dataVal.MapIndex(k).Interface()
- currentVal := reflect.Indirect(reflect.New(valElemType))
- if err := d.decode(fieldName, v, currentVal); err != nil {
- errors = appendErrors(errors, err)
- continue
- }
-
- valMap.SetMapIndex(currentKey, currentVal)
- }
-
- // Set the built up map to the value
- val.Set(valMap)
-
- // If we had errors, return those
- if len(errors) > 0 {
- return &Error{errors}
- }
-
- return nil
-}
-
-func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error {
- typ := dataVal.Type()
- for i := 0; i < typ.NumField(); i++ {
- // Get the StructField first since this is a cheap operation. If the
- // field is unexported, then ignore it.
- f := typ.Field(i)
- if f.PkgPath != "" {
- continue
- }
-
- // Next get the actual value of this field and verify it is assignable
- // to the map value.
- v := dataVal.Field(i)
- if !v.Type().AssignableTo(valMap.Type().Elem()) {
- return fmt.Errorf("cannot assign type '%s' to map value field of type '%s'", v.Type(), valMap.Type().Elem())
- }
-
- tagValue := f.Tag.Get(d.config.TagName)
- keyName := f.Name
-
- // If Squash is set in the config, we squash the field down.
- squash := d.config.Squash && v.Kind() == reflect.Struct && f.Anonymous
-
- // Determine the name of the key in the map
- if index := strings.Index(tagValue, ","); index != -1 {
- if tagValue[:index] == "-" {
- continue
- }
- // If "omitempty" is specified in the tag, it ignores empty values.
- if strings.Index(tagValue[index+1:], "omitempty") != -1 && isEmptyValue(v) {
- continue
- }
-
- // If "squash" is specified in the tag, we squash the field down.
- squash = !squash && strings.Index(tagValue[index+1:], "squash") != -1
- if squash {
- // When squashing, the embedded type can be a pointer to a struct.
- if v.Kind() == reflect.Ptr && v.Elem().Kind() == reflect.Struct {
- v = v.Elem()
- }
-
- // The final type must be a struct
- if v.Kind() != reflect.Struct {
- return fmt.Errorf("cannot squash non-struct type '%s'", v.Type())
- }
- }
- keyName = tagValue[:index]
- } else if len(tagValue) > 0 {
- if tagValue == "-" {
- continue
- }
- keyName = tagValue
- }
-
- switch v.Kind() {
- // this is an embedded struct, so handle it differently
- case reflect.Struct:
- x := reflect.New(v.Type())
- x.Elem().Set(v)
-
- vType := valMap.Type()
- vKeyType := vType.Key()
- vElemType := vType.Elem()
- mType := reflect.MapOf(vKeyType, vElemType)
- vMap := reflect.MakeMap(mType)
-
- // Creating a pointer to a map so that other methods can completely
- // overwrite the map if need be (looking at you decodeMapFromMap). The
- // indirection allows the underlying map to be settable (CanSet() == true)
- // where as reflect.MakeMap returns an unsettable map.
- addrVal := reflect.New(vMap.Type())
- reflect.Indirect(addrVal).Set(vMap)
-
- err := d.decode(keyName, x.Interface(), reflect.Indirect(addrVal))
- if err != nil {
- return err
- }
-
- // the underlying map may have been completely overwritten so pull
- // it indirectly out of the enclosing value.
- vMap = reflect.Indirect(addrVal)
-
- if squash {
- for _, k := range vMap.MapKeys() {
- valMap.SetMapIndex(k, vMap.MapIndex(k))
- }
- } else {
- valMap.SetMapIndex(reflect.ValueOf(keyName), vMap)
- }
-
- default:
- valMap.SetMapIndex(reflect.ValueOf(keyName), v)
- }
- }
-
- if val.CanAddr() {
- val.Set(valMap)
- }
-
- return nil
-}
-
-func (d *Decoder) decodePtr(name string, data interface{}, val reflect.Value) (bool, error) {
- // If the input data is nil, then we want to just set the output
- // pointer to be nil as well.
- isNil := data == nil
- if !isNil {
- switch v := reflect.Indirect(reflect.ValueOf(data)); v.Kind() {
- case reflect.Chan,
- reflect.Func,
- reflect.Interface,
- reflect.Map,
- reflect.Ptr,
- reflect.Slice:
- isNil = v.IsNil()
- }
- }
- if isNil {
- if !val.IsNil() && val.CanSet() {
- nilValue := reflect.New(val.Type()).Elem()
- val.Set(nilValue)
- }
-
- return true, nil
- }
-
- // Create an element of the concrete (non pointer) type and decode
- // into that. Then set the value of the pointer to this type.
- valType := val.Type()
- valElemType := valType.Elem()
- if val.CanSet() {
- realVal := val
- if realVal.IsNil() || d.config.ZeroFields {
- realVal = reflect.New(valElemType)
- }
-
- if err := d.decode(name, data, reflect.Indirect(realVal)); err != nil {
- return false, err
- }
-
- val.Set(realVal)
- } else {
- if err := d.decode(name, data, reflect.Indirect(val)); err != nil {
- return false, err
- }
- }
- return false, nil
-}
-
-func (d *Decoder) decodeFunc(name string, data interface{}, val reflect.Value) error {
- // Create an element of the concrete (non pointer) type and decode
- // into that. Then set the value of the pointer to this type.
- dataVal := reflect.Indirect(reflect.ValueOf(data))
- if val.Type() != dataVal.Type() {
- return fmt.Errorf(
- "'%s' expected type '%s', got unconvertible type '%s', value: '%v'",
- name, val.Type(), dataVal.Type(), data)
- }
- val.Set(dataVal)
- return nil
-}
-
-func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value) error {
- dataVal := reflect.Indirect(reflect.ValueOf(data))
- dataValKind := dataVal.Kind()
- valType := val.Type()
- valElemType := valType.Elem()
- sliceType := reflect.SliceOf(valElemType)
-
- // If we have a non array/slice type then we first attempt to convert.
- if dataValKind != reflect.Array && dataValKind != reflect.Slice {
- if d.config.WeaklyTypedInput {
- switch {
- // Slice and array we use the normal logic
- case dataValKind == reflect.Slice, dataValKind == reflect.Array:
- break
-
- // Empty maps turn into empty slices
- case dataValKind == reflect.Map:
- if dataVal.Len() == 0 {
- val.Set(reflect.MakeSlice(sliceType, 0, 0))
- return nil
- }
- // Create slice of maps of other sizes
- return d.decodeSlice(name, []interface{}{data}, val)
-
- case dataValKind == reflect.String && valElemType.Kind() == reflect.Uint8:
- return d.decodeSlice(name, []byte(dataVal.String()), val)
-
- // All other types we try to convert to the slice type
- // and "lift" it into it. i.e. a string becomes a string slice.
- default:
- // Just re-try this function with data as a slice.
- return d.decodeSlice(name, []interface{}{data}, val)
- }
- }
-
- return fmt.Errorf(
- "'%s': source data must be an array or slice, got %s", name, dataValKind)
- }
-
- // If the input value is nil, then don't allocate since empty != nil
- if dataVal.IsNil() {
- return nil
- }
-
- valSlice := val
- if valSlice.IsNil() || d.config.ZeroFields {
- // Make a new slice to hold our result, same size as the original data.
- valSlice = reflect.MakeSlice(sliceType, dataVal.Len(), dataVal.Len())
- }
-
- // Accumulate any errors
- errors := make([]string, 0)
-
- for i := 0; i < dataVal.Len(); i++ {
- currentData := dataVal.Index(i).Interface()
- for valSlice.Len() <= i {
- valSlice = reflect.Append(valSlice, reflect.Zero(valElemType))
- }
- currentField := valSlice.Index(i)
-
- fieldName := name + "[" + strconv.Itoa(i) + "]"
- if err := d.decode(fieldName, currentData, currentField); err != nil {
- errors = appendErrors(errors, err)
- }
- }
-
- // Finally, set the value to the slice we built up
- val.Set(valSlice)
-
- // If there were errors, we return those
- if len(errors) > 0 {
- return &Error{errors}
- }
-
- return nil
-}
-
-func (d *Decoder) decodeArray(name string, data interface{}, val reflect.Value) error {
- dataVal := reflect.Indirect(reflect.ValueOf(data))
- dataValKind := dataVal.Kind()
- valType := val.Type()
- valElemType := valType.Elem()
- arrayType := reflect.ArrayOf(valType.Len(), valElemType)
-
- valArray := val
-
- if valArray.Interface() == reflect.Zero(valArray.Type()).Interface() || d.config.ZeroFields {
- // Check input type
- if dataValKind != reflect.Array && dataValKind != reflect.Slice {
- if d.config.WeaklyTypedInput {
- switch {
- // Empty maps turn into empty arrays
- case dataValKind == reflect.Map:
- if dataVal.Len() == 0 {
- val.Set(reflect.Zero(arrayType))
- return nil
- }
-
- // All other types we try to convert to the array type
- // and "lift" it into it. i.e. a string becomes a string array.
- default:
- // Just re-try this function with data as a slice.
- return d.decodeArray(name, []interface{}{data}, val)
- }
- }
-
- return fmt.Errorf(
- "'%s': source data must be an array or slice, got %s", name, dataValKind)
-
- }
- if dataVal.Len() > arrayType.Len() {
- return fmt.Errorf(
- "'%s': expected source data to have length less or equal to %d, got %d", name, arrayType.Len(), dataVal.Len())
-
- }
-
- // Make a new array to hold our result, same size as the original data.
- valArray = reflect.New(arrayType).Elem()
- }
-
- // Accumulate any errors
- errors := make([]string, 0)
-
- for i := 0; i < dataVal.Len(); i++ {
- currentData := dataVal.Index(i).Interface()
- currentField := valArray.Index(i)
-
- fieldName := name + "[" + strconv.Itoa(i) + "]"
- if err := d.decode(fieldName, currentData, currentField); err != nil {
- errors = appendErrors(errors, err)
- }
- }
-
- // Finally, set the value to the array we built up
- val.Set(valArray)
-
- // If there were errors, we return those
- if len(errors) > 0 {
- return &Error{errors}
- }
-
- return nil
-}
-
-func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value) error {
- dataVal := reflect.Indirect(reflect.ValueOf(data))
-
- // If the type of the value to write to and the data match directly,
- // then we just set it directly instead of recursing into the structure.
- if dataVal.Type() == val.Type() {
- val.Set(dataVal)
- return nil
- }
-
- dataValKind := dataVal.Kind()
- switch dataValKind {
- case reflect.Map:
- return d.decodeStructFromMap(name, dataVal, val)
-
- case reflect.Struct:
- // Not the most efficient way to do this but we can optimize later if
- // we want to. To convert from struct to struct we go to map first
- // as an intermediary.
-
- // Make a new map to hold our result
- mapType := reflect.TypeOf((map[string]interface{})(nil))
- mval := reflect.MakeMap(mapType)
-
- // Creating a pointer to a map so that other methods can completely
- // overwrite the map if need be (looking at you decodeMapFromMap). The
- // indirection allows the underlying map to be settable (CanSet() == true)
- // where as reflect.MakeMap returns an unsettable map.
- addrVal := reflect.New(mval.Type())
-
- reflect.Indirect(addrVal).Set(mval)
- if err := d.decodeMapFromStruct(name, dataVal, reflect.Indirect(addrVal), mval); err != nil {
- return err
- }
-
- result := d.decodeStructFromMap(name, reflect.Indirect(addrVal), val)
- return result
-
- default:
- return fmt.Errorf("'%s' expected a map, got '%s'", name, dataVal.Kind())
- }
-}
-
-func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) error {
- dataValType := dataVal.Type()
- if kind := dataValType.Key().Kind(); kind != reflect.String && kind != reflect.Interface {
- return fmt.Errorf(
- "'%s' needs a map with string keys, has '%s' keys",
- name, dataValType.Key().Kind())
- }
-
- dataValKeys := make(map[reflect.Value]struct{})
- dataValKeysUnused := make(map[interface{}]struct{})
- for _, dataValKey := range dataVal.MapKeys() {
- dataValKeys[dataValKey] = struct{}{}
- dataValKeysUnused[dataValKey.Interface()] = struct{}{}
- }
-
- errors := make([]string, 0)
-
- // This slice will keep track of all the structs we'll be decoding.
- // There can be more than one struct if there are embedded structs
- // that are squashed.
- structs := make([]reflect.Value, 1, 5)
- structs[0] = val
-
- // Compile the list of all the fields that we're going to be decoding
- // from all the structs.
- type field struct {
- field reflect.StructField
- val reflect.Value
- }
-
- // remainField is set to a valid field set with the "remain" tag if
- // we are keeping track of remaining values.
- var remainField *field
-
- fields := []field{}
- for len(structs) > 0 {
- structVal := structs[0]
- structs = structs[1:]
-
- structType := structVal.Type()
-
- for i := 0; i < structType.NumField(); i++ {
- fieldType := structType.Field(i)
- fieldVal := structVal.Field(i)
- if fieldVal.Kind() == reflect.Ptr && fieldVal.Elem().Kind() == reflect.Struct {
- // Handle embedded struct pointers as embedded structs.
- fieldVal = fieldVal.Elem()
- }
-
- // If "squash" is specified in the tag, we squash the field down.
- squash := d.config.Squash && fieldVal.Kind() == reflect.Struct && fieldType.Anonymous
- remain := false
-
- // We always parse the tags cause we're looking for other tags too
- tagParts := strings.Split(fieldType.Tag.Get(d.config.TagName), ",")
- for _, tag := range tagParts[1:] {
- if tag == "squash" {
- squash = true
- break
- }
-
- if tag == "remain" {
- remain = true
- break
- }
- }
-
- if squash {
- if fieldVal.Kind() != reflect.Struct {
- errors = appendErrors(errors,
- fmt.Errorf("%s: unsupported type for squash: %s", fieldType.Name, fieldVal.Kind()))
- } else {
- structs = append(structs, fieldVal)
- }
- continue
- }
-
- // Build our field
- if remain {
- remainField = &field{fieldType, fieldVal}
- } else {
- // Normal struct field, store it away
- fields = append(fields, field{fieldType, fieldVal})
- }
- }
- }
-
- // for fieldType, field := range fields {
- for _, f := range fields {
- field, fieldValue := f.field, f.val
- fieldName := field.Name
-
- tagValue := field.Tag.Get(d.config.TagName)
- tagValue = strings.SplitN(tagValue, ",", 2)[0]
- if tagValue != "" {
- fieldName = tagValue
- }
-
- rawMapKey := reflect.ValueOf(fieldName)
- rawMapVal := dataVal.MapIndex(rawMapKey)
- if !rawMapVal.IsValid() {
- // Do a slower search by iterating over each key and
- // doing case-insensitive search.
- for dataValKey := range dataValKeys {
- mK, ok := dataValKey.Interface().(string)
- if !ok {
- // Not a string key
- continue
- }
-
- if strings.EqualFold(mK, fieldName) {
- rawMapKey = dataValKey
- rawMapVal = dataVal.MapIndex(dataValKey)
- break
- }
- }
-
- if !rawMapVal.IsValid() {
- // There was no matching key in the map for the value in
- // the struct. Just ignore.
- continue
- }
- }
-
- if !fieldValue.IsValid() {
- // This should never happen
- panic("field is not valid")
- }
-
- // If we can't set the field, then it is unexported or something,
- // and we just continue onwards.
- if !fieldValue.CanSet() {
- continue
- }
-
- // Delete the key we're using from the unused map so we stop tracking
- delete(dataValKeysUnused, rawMapKey.Interface())
-
- // If the name is empty string, then we're at the root, and we
- // don't dot-join the fields.
- if name != "" {
- fieldName = name + "." + fieldName
- }
-
- if err := d.decode(fieldName, rawMapVal.Interface(), fieldValue); err != nil {
- errors = appendErrors(errors, err)
- }
- }
-
- // If we have a "remain"-tagged field and we have unused keys then
- // we put the unused keys directly into the remain field.
- if remainField != nil && len(dataValKeysUnused) > 0 {
- // Build a map of only the unused values
- remain := map[interface{}]interface{}{}
- for key := range dataValKeysUnused {
- remain[key] = dataVal.MapIndex(reflect.ValueOf(key)).Interface()
- }
-
- // Decode it as-if we were just decoding this map onto our map.
- if err := d.decodeMap(name, remain, remainField.val); err != nil {
- errors = appendErrors(errors, err)
- }
-
- // Set the map to nil so we have none so that the next check will
- // not error (ErrorUnused)
- dataValKeysUnused = nil
- }
-
- if d.config.ErrorUnused && len(dataValKeysUnused) > 0 {
- keys := make([]string, 0, len(dataValKeysUnused))
- for rawKey := range dataValKeysUnused {
- keys = append(keys, rawKey.(string))
- }
- sort.Strings(keys)
-
- err := fmt.Errorf("'%s' has invalid keys: %s", name, strings.Join(keys, ", "))
- errors = appendErrors(errors, err)
- }
-
- if len(errors) > 0 {
- return &Error{errors}
- }
-
- // Add the unused keys to the list of unused keys if we're tracking metadata
- if d.config.Metadata != nil {
- for rawKey := range dataValKeysUnused {
- key := rawKey.(string)
- if name != "" {
- key = name + "." + key
- }
-
- d.config.Metadata.Unused = append(d.config.Metadata.Unused, key)
- }
- }
-
- return nil
-}
-
-func isEmptyValue(v reflect.Value) bool {
- switch getKind(v) {
- case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
- return v.Len() == 0
- case reflect.Bool:
- return !v.Bool()
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- return v.Int() == 0
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
- return v.Uint() == 0
- case reflect.Float32, reflect.Float64:
- return v.Float() == 0
- case reflect.Interface, reflect.Ptr:
- return v.IsNil()
- }
- return false
-}
-
-func getKind(val reflect.Value) reflect.Kind {
- kind := val.Kind()
-
- switch {
- case kind >= reflect.Int && kind <= reflect.Int64:
- return reflect.Int
- case kind >= reflect.Uint && kind <= reflect.Uint64:
- return reflect.Uint
- case kind >= reflect.Float32 && kind <= reflect.Float64:
- return reflect.Float32
- default:
- return kind
- }
-}
diff --git a/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md b/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md
index cb72bd6f2b..fea67526e0 100644
--- a/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md
+++ b/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md
@@ -1,3 +1,32 @@
+## 2.13.0
+
+### Features
+
+Add PreviewSpect() to enable programmatic preview access to the suite report (fixes #1225)
+
+## 2.12.1
+
+### Fixes
+- Print logr prefix if it exists (#1275) [90d4846]
+
+### Maintenance
+- Bump actions/checkout from 3 to 4 (#1271) [555f543]
+- Bump golang.org/x/sys from 0.11.0 to 0.12.0 (#1270) [d867b7d]
+
+## 2.12.0
+
+### Features
+
+- feat: allow MustPassRepeatedly decorator to be set at suite level (#1266) [05de518]
+
+### Fixes
+
+- fix-errors-in-readme (#1244) [27c2f5d]
+
+### Maintenance
+
+Various chores/dependency bumps.
+
## 2.11.0
In prior versions of Ginkgo specs the CLI filter flags (e.g. `--focus`, `--label-filter`) would _override_ any programmatic focus. This behavior has proved surprising and confusing in at least the following ways:
diff --git a/vendor/github.com/onsi/ginkgo/v2/README.md b/vendor/github.com/onsi/ginkgo/v2/README.md
index d0473a467c..cb23ffdf6a 100644
--- a/vendor/github.com/onsi/ginkgo/v2/README.md
+++ b/vendor/github.com/onsi/ginkgo/v2/README.md
@@ -15,7 +15,7 @@ import (
...
)
-Describe("Checking books out of the library", Label("library"), func() {
+var _ = Describe("Checking books out of the library", Label("library"), func() {
var library *libraries.Library
var book *books.Book
var valjean *users.User
@@ -50,7 +50,7 @@ Describe("Checking books out of the library", Label("library"), func() {
It("tells the user", func(ctx SpecContext) {
err := valjean.Checkout(ctx, library, "Les Miserables")
- Expect(error).To(MatchError("Les Miserables is currently checked out"))
+ Expect(err).To(MatchError("Les Miserables is currently checked out"))
}, SpecTimeout(time.Second * 5))
It("lets the user place a hold and get notified later", func(ctx SpecContext) {
@@ -74,7 +74,7 @@ Describe("Checking books out of the library", Label("library"), func() {
When("the library does not have the book in question", func() {
It("tells the reader the book is unavailable", func(ctx SpecContext) {
err := valjean.Checkout(ctx, library, "Les Miserables")
- Expect(error).To(MatchError("Les Miserables is not in the library catalog"))
+ Expect(err).To(MatchError("Les Miserables is not in the library catalog"))
}, SpecTimeout(time.Second * 5))
})
})
diff --git a/vendor/github.com/onsi/ginkgo/v2/core_dsl.go b/vendor/github.com/onsi/ginkgo/v2/core_dsl.go
index a244bdc180..2d7a70eccb 100644
--- a/vendor/github.com/onsi/ginkgo/v2/core_dsl.go
+++ b/vendor/github.com/onsi/ginkgo/v2/core_dsl.go
@@ -248,31 +248,13 @@ func RunSpecs(t GinkgoTestingT, description string, args ...interface{}) bool {
exitIfErr(types.GinkgoErrors.RerunningSuite())
}
suiteDidRun = true
-
- suiteLabels := Labels{}
- configErrors := []error{}
- for _, arg := range args {
- switch arg := arg.(type) {
- case types.SuiteConfig:
- suiteConfig = arg
- case types.ReporterConfig:
- reporterConfig = arg
- case Labels:
- suiteLabels = append(suiteLabels, arg...)
- default:
- configErrors = append(configErrors, types.GinkgoErrors.UnknownTypePassedToRunSpecs(arg))
- }
+ err := global.PushClone()
+ if err != nil {
+ exitIfErr(err)
}
- exitIfErrors(configErrors)
+ defer global.PopClone()
- configErrors = types.VetConfig(flagSet, suiteConfig, reporterConfig)
- if len(configErrors) > 0 {
- fmt.Fprintf(formatter.ColorableStdErr, formatter.F("{{red}}Ginkgo detected configuration issues:{{/}}\n"))
- for _, err := range configErrors {
- fmt.Fprintf(formatter.ColorableStdErr, err.Error())
- }
- os.Exit(1)
- }
+ suiteLabels := extractSuiteConfiguration(args)
var reporter reporters.Reporter
if suiteConfig.ParallelTotal == 1 {
@@ -308,9 +290,8 @@ func RunSpecs(t GinkgoTestingT, description string, args ...interface{}) bool {
registerReportAfterSuiteNodeForAutogeneratedReports(reporterConfig)
}
- err := global.Suite.BuildTree()
+ err = global.Suite.BuildTree()
exitIfErr(err)
-
suitePath, err := os.Getwd()
exitIfErr(err)
suitePath, err = filepath.Abs(suitePath)
@@ -335,6 +316,69 @@ func RunSpecs(t GinkgoTestingT, description string, args ...interface{}) bool {
return passed
}
+func extractSuiteConfiguration(args []interface{}) Labels {
+ suiteLabels := Labels{}
+ configErrors := []error{}
+ for _, arg := range args {
+ switch arg := arg.(type) {
+ case types.SuiteConfig:
+ suiteConfig = arg
+ case types.ReporterConfig:
+ reporterConfig = arg
+ case Labels:
+ suiteLabels = append(suiteLabels, arg...)
+ default:
+ configErrors = append(configErrors, types.GinkgoErrors.UnknownTypePassedToRunSpecs(arg))
+ }
+ }
+ exitIfErrors(configErrors)
+
+ configErrors = types.VetConfig(flagSet, suiteConfig, reporterConfig)
+ if len(configErrors) > 0 {
+ fmt.Fprintf(formatter.ColorableStdErr, formatter.F("{{red}}Ginkgo detected configuration issues:{{/}}\n"))
+ for _, err := range configErrors {
+ fmt.Fprintf(formatter.ColorableStdErr, err.Error())
+ }
+ os.Exit(1)
+ }
+
+ return suiteLabels
+}
+
+/*
+PreviewSpecs walks the testing tree and produces a report without actually invoking the specs.
+See http://onsi.github.io/ginkgo/#previewing-specs for more information.
+*/
+func PreviewSpecs(description string, args ...any) Report {
+ err := global.PushClone()
+ if err != nil {
+ exitIfErr(err)
+ }
+ defer global.PopClone()
+
+ suiteLabels := extractSuiteConfiguration(args)
+ priorDryRun, priorParallelTotal, priorParallelProcess := suiteConfig.DryRun, suiteConfig.ParallelTotal, suiteConfig.ParallelProcess
+ suiteConfig.DryRun, suiteConfig.ParallelTotal, suiteConfig.ParallelProcess = true, 1, 1
+ defer func() {
+ suiteConfig.DryRun, suiteConfig.ParallelTotal, suiteConfig.ParallelProcess = priorDryRun, priorParallelTotal, priorParallelProcess
+ }()
+ reporter := reporters.NoopReporter{}
+ outputInterceptor = internal.NoopOutputInterceptor{}
+ client = nil
+ writer := GinkgoWriter.(*internal.Writer)
+
+ err = global.Suite.BuildTree()
+ exitIfErr(err)
+ suitePath, err := os.Getwd()
+ exitIfErr(err)
+ suitePath, err = filepath.Abs(suitePath)
+ exitIfErr(err)
+
+ global.Suite.Run(description, suiteLabels, suitePath, global.Failer, reporter, writer, outputInterceptor, interrupt_handler.NewInterruptHandler(client), client, internal.RegisterForProgressSignal, suiteConfig)
+
+ return global.Suite.GetPreviewReport()
+}
+
/*
Skip instructs Ginkgo to skip the current spec
diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/ginkgo.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/ginkgo.go
index 0b9b19fe74..958daccbfa 100644
--- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/ginkgo.go
+++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/ginkgo.go
@@ -244,9 +244,7 @@ func labelFromCallExpr(ce *ast.CallExpr) []string {
}
if id.Name == "Label" {
ls := extractLabels(expr)
- for _, label := range ls {
- labels = append(labels, label)
- }
+ labels = append(labels, ls...)
}
}
}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/global/init.go b/vendor/github.com/onsi/ginkgo/v2/internal/global/init.go
index f2c0fd89c0..464e3c97ff 100644
--- a/vendor/github.com/onsi/ginkgo/v2/internal/global/init.go
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/global/init.go
@@ -6,6 +6,7 @@ import (
var Suite *internal.Suite
var Failer *internal.Failer
+var backupSuite *internal.Suite
func init() {
InitializeGlobals()
@@ -15,3 +16,13 @@ func InitializeGlobals() {
Failer = internal.NewFailer()
Suite = internal.NewSuite()
}
+
+func PushClone() error {
+ var err error
+ backupSuite, err = Suite.Clone()
+ return err
+}
+
+func PopClone() {
+ Suite = backupSuite
+}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/group.go b/vendor/github.com/onsi/ginkgo/v2/internal/group.go
index ae1b7b0112..02c9fe4fcd 100644
--- a/vendor/github.com/onsi/ginkgo/v2/internal/group.go
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/group.go
@@ -321,7 +321,10 @@ func (g *group) run(specs Specs) {
if !skip {
var maxAttempts = 1
- if g.suite.currentSpecReport.MaxMustPassRepeatedly > 0 {
+ if g.suite.config.MustPassRepeatedly > 0 {
+ maxAttempts = g.suite.config.MustPassRepeatedly
+ g.suite.currentSpecReport.MaxMustPassRepeatedly = maxAttempts
+ } else if g.suite.currentSpecReport.MaxMustPassRepeatedly > 0 {
maxAttempts = max(1, spec.MustPassRepeatedly())
} else if g.suite.config.FlakeAttempts > 0 {
maxAttempts = g.suite.config.FlakeAttempts
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/node.go b/vendor/github.com/onsi/ginkgo/v2/internal/node.go
index 14c7cf54ed..16f0dc2278 100644
--- a/vendor/github.com/onsi/ginkgo/v2/internal/node.go
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/node.go
@@ -597,12 +597,16 @@ func (n Node) IsZero() bool {
/* Nodes */
type Nodes []Node
+func (n Nodes) Clone() Nodes {
+ nodes := make(Nodes, len(n))
+ copy(nodes, n)
+ return nodes
+}
+
func (n Nodes) CopyAppend(nodes ...Node) Nodes {
numN := len(n)
out := make(Nodes, numN+len(nodes))
- for i, node := range n {
- out[i] = node
- }
+ copy(out, n)
for j, node := range nodes {
out[numN+j] = node
}
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/suite.go b/vendor/github.com/onsi/ginkgo/v2/internal/suite.go
index ea0d259d92..fe6e8288ad 100644
--- a/vendor/github.com/onsi/ginkgo/v2/internal/suite.go
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/suite.go
@@ -77,6 +77,20 @@ func NewSuite() *Suite {
}
}
+func (suite *Suite) Clone() (*Suite, error) {
+ if suite.phase != PhaseBuildTopLevel {
+ return nil, fmt.Errorf("cnanot clone suite after tree has been built")
+ }
+ return &Suite{
+ tree: &TreeNode{},
+ phase: PhaseBuildTopLevel,
+ ProgressReporterManager: NewProgressReporterManager(),
+ topLevelContainers: suite.topLevelContainers.Clone(),
+ suiteNodes: suite.suiteNodes.Clone(),
+ selectiveLock: &sync.Mutex{},
+ }, nil
+}
+
func (suite *Suite) BuildTree() error {
// During PhaseBuildTopLevel, the top level containers are stored in suite.topLevelCotainers and entered
// We now enter PhaseBuildTree where these top level containers are entered and added to the spec tree
@@ -328,6 +342,16 @@ func (suite *Suite) CurrentSpecReport() types.SpecReport {
return report
}
+// Only valid in the preview context. In general suite.report only includes
+// the specs run by _this_ node - it is only at the end of the suite that
+// the parallel reports are aggregated. However in the preview context we run
+// in series and
+func (suite *Suite) GetPreviewReport() types.Report {
+ suite.selectiveLock.Lock()
+ defer suite.selectiveLock.Unlock()
+ return suite.report
+}
+
func (suite *Suite) AddReportEntry(entry ReportEntry) error {
if suite.phase != PhaseRun {
return types.GinkgoErrors.AddReportEntryNotDuringRunPhase(entry.Location)
diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/writer.go b/vendor/github.com/onsi/ginkgo/v2/internal/writer.go
index 574f172df3..aab42d5fb3 100644
--- a/vendor/github.com/onsi/ginkgo/v2/internal/writer.go
+++ b/vendor/github.com/onsi/ginkgo/v2/internal/writer.go
@@ -135,6 +135,10 @@ func (w *Writer) Println(a ...interface{}) {
func GinkgoLogrFunc(writer *Writer) logr.Logger {
return funcr.New(func(prefix, args string) {
- writer.Printf("%s\n", args)
+ if prefix == "" {
+ writer.Printf("%s\n", args)
+ } else {
+ writer.Printf("%s %s\n", prefix, args)
+ }
}, funcr.Options{})
}
diff --git a/vendor/github.com/onsi/ginkgo/v2/types/config.go b/vendor/github.com/onsi/ginkgo/v2/types/config.go
index 1014c7b49f..c88fc85a75 100644
--- a/vendor/github.com/onsi/ginkgo/v2/types/config.go
+++ b/vendor/github.com/onsi/ginkgo/v2/types/config.go
@@ -27,6 +27,7 @@ type SuiteConfig struct {
FailOnPending bool
FailFast bool
FlakeAttempts int
+ MustPassRepeatedly int
DryRun bool
PollProgressAfter time.Duration
PollProgressInterval time.Duration
diff --git a/vendor/github.com/onsi/ginkgo/v2/types/errors.go b/vendor/github.com/onsi/ginkgo/v2/types/errors.go
index 1e0dbfd9df..4fbdc3e9b1 100644
--- a/vendor/github.com/onsi/ginkgo/v2/types/errors.go
+++ b/vendor/github.com/onsi/ginkgo/v2/types/errors.go
@@ -453,8 +453,8 @@ func (g ginkgoErrors) InvalidEntryDescription(cl CodeLocation) error {
func (g ginkgoErrors) MissingParametersForTableFunction(cl CodeLocation) error {
return GinkgoError{
- Heading: fmt.Sprintf("No parameters have been passed to the Table Function"),
- Message: fmt.Sprintf("The Table Function expected at least 1 parameter"),
+ Heading: "No parameters have been passed to the Table Function",
+ Message: "The Table Function expected at least 1 parameter",
CodeLocation: cl,
DocLink: "table-specs",
}
diff --git a/vendor/github.com/onsi/ginkgo/v2/types/types.go b/vendor/github.com/onsi/ginkgo/v2/types/types.go
index d048a8adab..aae69b04c9 100644
--- a/vendor/github.com/onsi/ginkgo/v2/types/types.go
+++ b/vendor/github.com/onsi/ginkgo/v2/types/types.go
@@ -97,9 +97,7 @@ func (report Report) Add(other Report) Report {
report.RunTime = report.EndTime.Sub(report.StartTime)
reports := make(SpecReports, len(report.SpecReports)+len(other.SpecReports))
- for i := range report.SpecReports {
- reports[i] = report.SpecReports[i]
- }
+ copy(reports, report.SpecReports)
offset := len(report.SpecReports)
for i := range other.SpecReports {
reports[i+offset] = other.SpecReports[i]
diff --git a/vendor/github.com/onsi/ginkgo/v2/types/version.go b/vendor/github.com/onsi/ginkgo/v2/types/version.go
index f895739b83..a37f308286 100644
--- a/vendor/github.com/onsi/ginkgo/v2/types/version.go
+++ b/vendor/github.com/onsi/ginkgo/v2/types/version.go
@@ -1,3 +1,3 @@
package types
-const VERSION = "2.11.0"
+const VERSION = "2.13.0"
diff --git a/vendor/github.com/onsi/gomega/CHANGELOG.md b/vendor/github.com/onsi/gomega/CHANGELOG.md
index 9b83dd6d48..fbb12a157f 100644
--- a/vendor/github.com/onsi/gomega/CHANGELOG.md
+++ b/vendor/github.com/onsi/gomega/CHANGELOG.md
@@ -1,3 +1,37 @@
+## 1.28.0
+
+### Features
+- Add VerifyHost handler to ghttp (#698) [0b03b36]
+
+### Fixes
+- Read Body for Newer Responses in HaveHTTPBodyMatcher (#686) [18d6673]
+
+### Maintenance
+- Bump github.com/onsi/ginkgo/v2 from 2.11.0 to 2.12.0 (#693) [55a33f3]
+- Typo in matchers.go (#691) [de68e8f]
+- Bump commonmarker from 0.23.9 to 0.23.10 in /docs (#690) [ab17f5e]
+- chore: update test matrix for Go 1.21 (#689) [5069017]
+- Bump golang.org/x/net from 0.12.0 to 0.14.0 (#688) [babe25f]
+
+## 1.27.10
+
+### Fixes
+- fix: go 1.21 adding goroutine ID to creator+location (#685) [bdc7803]
+
+## 1.27.9
+
+### Fixes
+- Prevent nil-dereference in format.Object for boxed nil error (#681) [3b31fc3]
+
+### Maintenance
+- Bump golang.org/x/net from 0.11.0 to 0.12.0 (#679) [360849b]
+- chore: use String() instead of fmt.Sprintf (#678) [86f3659]
+- Bump golang.org/x/net from 0.10.0 to 0.11.0 (#674) [642ead0]
+- chore: unnecessary use of fmt.Sprintf (#677) [ceb9ca6]
+- Bump github.com/onsi/ginkgo/v2 from 2.10.0 to 2.11.0 (#675) [a2087d8]
+- docs: fix ContainSubstring references (#673) [fc9a89f]
+- Bump github.com/onsi/ginkgo/v2 from 2.9.7 to 2.10.0 (#671) [9076019]
+
## 1.27.8
### Fixes
diff --git a/vendor/github.com/onsi/gomega/format/format.go b/vendor/github.com/onsi/gomega/format/format.go
index 56bdd053bb..6c1680638b 100644
--- a/vendor/github.com/onsi/gomega/format/format.go
+++ b/vendor/github.com/onsi/gomega/format/format.go
@@ -259,7 +259,7 @@ func Object(object interface{}, indentation uint) string {
indent := strings.Repeat(Indent, int(indentation))
value := reflect.ValueOf(object)
commonRepresentation := ""
- if err, ok := object.(error); ok {
+ if err, ok := object.(error); ok && !isNilValue(value) { // isNilValue check needed here to avoid nil deref due to boxed nil
commonRepresentation += "\n" + IndentString(err.Error(), indentation) + "\n" + indent
}
return fmt.Sprintf("%s<%s>: %s%s", indent, formatType(value), commonRepresentation, formatValue(value, indentation))
@@ -302,7 +302,7 @@ func formatType(v reflect.Value) string {
case reflect.Map:
return fmt.Sprintf("%s | len:%d", v.Type(), v.Len())
default:
- return fmt.Sprintf("%s", v.Type())
+ return v.Type().String()
}
}
diff --git a/vendor/github.com/onsi/gomega/gomega_dsl.go b/vendor/github.com/onsi/gomega/gomega_dsl.go
index bc7ec293de..675a17840e 100644
--- a/vendor/github.com/onsi/gomega/gomega_dsl.go
+++ b/vendor/github.com/onsi/gomega/gomega_dsl.go
@@ -22,7 +22,7 @@ import (
"github.com/onsi/gomega/types"
)
-const GOMEGA_VERSION = "1.27.8"
+const GOMEGA_VERSION = "1.28.0"
const nilGomegaPanic = `You are trying to make an assertion, but haven't registered Gomega's fail handler.
If you're using Ginkgo then you probably forgot to put your assertion in an It().
diff --git a/vendor/github.com/onsi/gomega/matchers.go b/vendor/github.com/onsi/gomega/matchers.go
index b832f3dbaf..4c13ad0a7b 100644
--- a/vendor/github.com/onsi/gomega/matchers.go
+++ b/vendor/github.com/onsi/gomega/matchers.go
@@ -92,9 +92,9 @@ func Succeed() types.GomegaMatcher {
//
// These are valid use-cases:
//
-// Expect(err).Should(MatchError("an error")) //asserts that err.Error() == "an error"
-// Expect(err).Should(MatchError(SomeError)) //asserts that err == SomeError (via reflect.DeepEqual)
-// Expect(err).Should(MatchError(ContainsSubstring("sprocket not found"))) // asserts that edrr.Error() contains substring "sprocket not found"
+// Expect(err).Should(MatchError("an error")) //asserts that err.Error() == "an error"
+// Expect(err).Should(MatchError(SomeError)) //asserts that err == SomeError (via reflect.DeepEqual)
+// Expect(err).Should(MatchError(ContainSubstring("sprocket not found"))) // asserts that err.Error() contains substring "sprocket not found"
//
// It is an error for err to be nil or an object that does not implement the
// Error interface
diff --git a/vendor/github.com/onsi/gomega/matchers/be_a_directory.go b/vendor/github.com/onsi/gomega/matchers/be_a_directory.go
index acffc8570f..93d4497c70 100644
--- a/vendor/github.com/onsi/gomega/matchers/be_a_directory.go
+++ b/vendor/github.com/onsi/gomega/matchers/be_a_directory.go
@@ -52,5 +52,5 @@ func (matcher *BeADirectoryMatcher) FailureMessage(actual interface{}) (message
}
func (matcher *BeADirectoryMatcher) NegatedFailureMessage(actual interface{}) (message string) {
- return format.Message(actual, fmt.Sprintf("not be a directory"))
+ return format.Message(actual, "not be a directory")
}
diff --git a/vendor/github.com/onsi/gomega/matchers/be_a_regular_file.go b/vendor/github.com/onsi/gomega/matchers/be_a_regular_file.go
index 89441c8003..8fefc4deb7 100644
--- a/vendor/github.com/onsi/gomega/matchers/be_a_regular_file.go
+++ b/vendor/github.com/onsi/gomega/matchers/be_a_regular_file.go
@@ -52,5 +52,5 @@ func (matcher *BeARegularFileMatcher) FailureMessage(actual interface{}) (messag
}
func (matcher *BeARegularFileMatcher) NegatedFailureMessage(actual interface{}) (message string) {
- return format.Message(actual, fmt.Sprintf("not be a regular file"))
+ return format.Message(actual, "not be a regular file")
}
diff --git a/vendor/github.com/onsi/gomega/matchers/be_an_existing_file.go b/vendor/github.com/onsi/gomega/matchers/be_an_existing_file.go
index ec6506b001..e2bdd28113 100644
--- a/vendor/github.com/onsi/gomega/matchers/be_an_existing_file.go
+++ b/vendor/github.com/onsi/gomega/matchers/be_an_existing_file.go
@@ -32,9 +32,9 @@ func (matcher *BeAnExistingFileMatcher) Match(actual interface{}) (success bool,
}
func (matcher *BeAnExistingFileMatcher) FailureMessage(actual interface{}) (message string) {
- return format.Message(actual, fmt.Sprintf("to exist"))
+ return format.Message(actual, "to exist")
}
func (matcher *BeAnExistingFileMatcher) NegatedFailureMessage(actual interface{}) (message string) {
- return format.Message(actual, fmt.Sprintf("not to exist"))
+ return format.Message(actual, "not to exist")
}
diff --git a/vendor/github.com/onsi/gomega/matchers/have_http_body_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_http_body_matcher.go
index 6a3dcdc353..d14d9e5fc6 100644
--- a/vendor/github.com/onsi/gomega/matchers/have_http_body_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/have_http_body_matcher.go
@@ -11,8 +11,9 @@ import (
)
type HaveHTTPBodyMatcher struct {
- Expected interface{}
- cachedBody []byte
+ Expected interface{}
+ cachedResponse interface{}
+ cachedBody []byte
}
func (matcher *HaveHTTPBodyMatcher) Match(actual interface{}) (bool, error) {
@@ -73,7 +74,7 @@ func (matcher *HaveHTTPBodyMatcher) NegatedFailureMessage(actual interface{}) (m
// the Reader is closed and it is not readable again in FailureMessage()
// or NegatedFailureMessage()
func (matcher *HaveHTTPBodyMatcher) body(actual interface{}) ([]byte, error) {
- if matcher.cachedBody != nil {
+ if matcher.cachedResponse == actual && matcher.cachedBody != nil {
return matcher.cachedBody, nil
}
@@ -91,8 +92,10 @@ func (matcher *HaveHTTPBodyMatcher) body(actual interface{}) ([]byte, error) {
switch a := actual.(type) {
case *http.Response:
+ matcher.cachedResponse = a
return body(a)
case *httptest.ResponseRecorder:
+ matcher.cachedResponse = a
return body(a.Result())
default:
return nil, fmt.Errorf("HaveHTTPBody matcher expects *http.Response or *httptest.ResponseRecorder. Got:\n%s", format.Object(actual, 1))
diff --git a/vendor/github.com/pborman/uuid/time.go b/vendor/github.com/pborman/uuid/time.go
index 5c0960d872..7286824d89 100644
--- a/vendor/github.com/pborman/uuid/time.go
+++ b/vendor/github.com/pborman/uuid/time.go
@@ -29,7 +29,7 @@ func GetTime() (Time, uint16, error) { return guuid.GetTime() }
// for
func ClockSequence() int { return guuid.ClockSequence() }
-// SetClockSeq sets the clock sequence to the lower 14 bits of seq. Setting to
+// SetClockSequence sets the clock sequence to the lower 14 bits of seq. Setting to
// -1 causes a new sequence to be generated.
func SetClockSequence(seq int) { guuid.SetClockSequence(seq) }
diff --git a/vendor/github.com/pborman/uuid/version4.go b/vendor/github.com/pborman/uuid/version4.go
index b459d46d13..767dd0c3aa 100644
--- a/vendor/github.com/pborman/uuid/version4.go
+++ b/vendor/github.com/pborman/uuid/version4.go
@@ -6,7 +6,7 @@ package uuid
import guuid "github.com/google/uuid"
-// Random returns a Random (Version 4) UUID or panics.
+// NewRandom returns a Random (Version 4) UUID or panics.
//
// The strength of the UUIDs is based on the strength of the crypto/rand
// package.
diff --git a/vendor/github.com/pelletier/go-toml/README.md b/vendor/github.com/pelletier/go-toml/README.md
index 6c061712bb..7399e04bf6 100644
--- a/vendor/github.com/pelletier/go-toml/README.md
+++ b/vendor/github.com/pelletier/go-toml/README.md
@@ -25,9 +25,9 @@ and [much faster][v2-bench]. If you only need reading and writing TOML documents
(majority of cases), those features are implemented and the API unlikely to
change.
-The remaining features (Document structure editing and tooling) will be added
-shortly. While pull-requests are welcome on v1, no active development is
-expected on it. When v2.0.0 is released, v1 will be deprecated.
+The remaining features will be added shortly. While pull-requests are welcome on
+v1, no active development is expected on it. When v2.0.0 is released, v1 will be
+deprecated.
👉 [go-toml v2][v2]
diff --git a/vendor/github.com/pelletier/go-toml/SECURITY.md b/vendor/github.com/pelletier/go-toml/SECURITY.md
new file mode 100644
index 0000000000..b2f21cfc92
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/SECURITY.md
@@ -0,0 +1,19 @@
+# Security Policy
+
+## Supported Versions
+
+Use this section to tell people about which versions of your project are
+currently being supported with security updates.
+
+| Version | Supported |
+| ---------- | ------------------ |
+| Latest 2.x | :white_check_mark: |
+| All 1.x | :x: |
+| All 0.x | :x: |
+
+## Reporting a Vulnerability
+
+Email a vulnerability report to `security@pelletier.codes`. Make sure to include
+as many details as possible to reproduce the vulnerability. This is a
+side-project: I will try to get back to you as quickly as possible, time
+permitting in my personal life. Providing a working patch helps very much!
diff --git a/vendor/github.com/pelletier/go-toml/marshal.go b/vendor/github.com/pelletier/go-toml/marshal.go
index 3443c35452..5712730498 100644
--- a/vendor/github.com/pelletier/go-toml/marshal.go
+++ b/vendor/github.com/pelletier/go-toml/marshal.go
@@ -1113,7 +1113,7 @@ func (d *Decoder) valueFromToml(mtype reflect.Type, tval interface{}, mval1 *ref
return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String())
}
- if val.Convert(reflect.TypeOf(int(1))).Int() < 0 {
+ if val.Type().Kind() != reflect.Uint64 && val.Convert(reflect.TypeOf(int(1))).Int() < 0 {
return reflect.ValueOf(nil), fmt.Errorf("%v(%T) is negative so does not fit in %v", tval, tval, mtype.String())
}
if reflect.Indirect(reflect.New(mtype)).OverflowUint(val.Convert(reflect.TypeOf(uint64(0))).Uint()) {
diff --git a/vendor/github.com/pelletier/go-toml/parser.go b/vendor/github.com/pelletier/go-toml/parser.go
index f5e1a44fb4..b3726d0dd8 100644
--- a/vendor/github.com/pelletier/go-toml/parser.go
+++ b/vendor/github.com/pelletier/go-toml/parser.go
@@ -293,42 +293,41 @@ func (p *tomlParser) parseRvalue() interface{} {
return math.NaN()
case tokenInteger:
cleanedVal := cleanupNumberToken(tok.val)
- var err error
- var val int64
+ base := 10
+ s := cleanedVal
+ checkInvalidUnderscore := numberContainsInvalidUnderscore
if len(cleanedVal) >= 3 && cleanedVal[0] == '0' {
switch cleanedVal[1] {
case 'x':
- err = hexNumberContainsInvalidUnderscore(tok.val)
- if err != nil {
- p.raiseError(tok, "%s", err)
- }
- val, err = strconv.ParseInt(cleanedVal[2:], 16, 64)
+ checkInvalidUnderscore = hexNumberContainsInvalidUnderscore
+ base = 16
case 'o':
- err = numberContainsInvalidUnderscore(tok.val)
- if err != nil {
- p.raiseError(tok, "%s", err)
- }
- val, err = strconv.ParseInt(cleanedVal[2:], 8, 64)
+ base = 8
case 'b':
- err = numberContainsInvalidUnderscore(tok.val)
- if err != nil {
- p.raiseError(tok, "%s", err)
- }
- val, err = strconv.ParseInt(cleanedVal[2:], 2, 64)
+ base = 2
default:
panic("invalid base") // the lexer should catch this first
}
- } else {
- err = numberContainsInvalidUnderscore(tok.val)
- if err != nil {
- p.raiseError(tok, "%s", err)
- }
- val, err = strconv.ParseInt(cleanedVal, 10, 64)
+ s = cleanedVal[2:]
}
+
+ err := checkInvalidUnderscore(tok.val)
if err != nil {
p.raiseError(tok, "%s", err)
}
- return val
+
+ var val interface{}
+ val, err = strconv.ParseInt(s, base, 64)
+ if err == nil {
+ return val
+ }
+
+ if s[0] != '-' {
+ if val, err = strconv.ParseUint(s, base, 64); err == nil {
+ return val
+ }
+ }
+ p.raiseError(tok, "%s", err)
case tokenFloat:
err := numberContainsInvalidUnderscore(tok.val)
if err != nil {
diff --git a/vendor/github.com/pelletier/go-toml/toml.go b/vendor/github.com/pelletier/go-toml/toml.go
index 6d82587c48..5541b941f8 100644
--- a/vendor/github.com/pelletier/go-toml/toml.go
+++ b/vendor/github.com/pelletier/go-toml/toml.go
@@ -471,7 +471,7 @@ func LoadBytes(b []byte) (tree *Tree, err error) {
if _, ok := r.(runtime.Error); ok {
panic(r)
}
- err = errors.New(r.(string))
+ err = fmt.Errorf("%s", r)
}
}()
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector_latest.go b/vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector_latest.go
index 246c5ea943..2f5616894e 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector_latest.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector_latest.go
@@ -28,6 +28,8 @@ var (
MetricsAll = GoRuntimeMetricsRule{regexp.MustCompile("/.*")}
// MetricsGC allows only GC metrics to be collected from Go runtime.
// e.g. go_gc_cycles_automatic_gc_cycles_total
+ // NOTE: This does not include new class of "/cpu/classes/gc/..." metrics.
+ // Use custom metric rule to access those.
MetricsGC = GoRuntimeMetricsRule{regexp.MustCompile(`^/gc/.*`)}
// MetricsMemory allows only memory metrics to be collected from Go runtime.
// e.g. go_memory_classes_heap_free_bytes
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/counter.go b/vendor/github.com/prometheus/client_golang/prometheus/counter.go
index a912b75a05..62de4dc59a 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/counter.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/counter.go
@@ -59,6 +59,18 @@ type ExemplarAdder interface {
// CounterOpts is an alias for Opts. See there for doc comments.
type CounterOpts Opts
+// CounterVecOpts bundles the options to create a CounterVec metric.
+// It is mandatory to set CounterOpts, see there for mandatory fields. VariableLabels
+// is optional and can safely be left to its default value.
+type CounterVecOpts struct {
+ CounterOpts
+
+ // VariableLabels are used to partition the metric vector by the given set
+ // of labels. Each label value will be constrained with the optional Contraint
+ // function, if provided.
+ VariableLabels ConstrainableLabels
+}
+
// NewCounter creates a new Counter based on the provided CounterOpts.
//
// The returned implementation also implements ExemplarAdder. It is safe to
@@ -174,16 +186,24 @@ type CounterVec struct {
// NewCounterVec creates a new CounterVec based on the provided CounterOpts and
// partitioned by the given label names.
func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec {
- desc := NewDesc(
+ return V2.NewCounterVec(CounterVecOpts{
+ CounterOpts: opts,
+ VariableLabels: UnconstrainedLabels(labelNames),
+ })
+}
+
+// NewCounterVec creates a new CounterVec based on the provided CounterVecOpts.
+func (v2) NewCounterVec(opts CounterVecOpts) *CounterVec {
+ desc := V2.NewDesc(
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
opts.Help,
- labelNames,
+ opts.VariableLabels,
opts.ConstLabels,
)
return &CounterVec{
MetricVec: NewMetricVec(desc, func(lvs ...string) Metric {
if len(lvs) != len(desc.variableLabels) {
- panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, lvs))
+ panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.labelNames(), lvs))
}
result := &counter{desc: desc, labelPairs: MakeLabelPairs(desc, lvs), now: time.Now}
result.init(result) // Init self-collection.
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/desc.go b/vendor/github.com/prometheus/client_golang/prometheus/desc.go
index 8bc5e44e2f..deedc2dfbe 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/desc.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/desc.go
@@ -14,20 +14,16 @@
package prometheus
import (
- "errors"
"fmt"
"sort"
"strings"
"github.com/cespare/xxhash/v2"
-
- "github.com/prometheus/client_golang/prometheus/internal"
-
- //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
- "github.com/golang/protobuf/proto"
+ dto "github.com/prometheus/client_model/go"
"github.com/prometheus/common/model"
+ "google.golang.org/protobuf/proto"
- dto "github.com/prometheus/client_model/go"
+ "github.com/prometheus/client_golang/prometheus/internal"
)
// Desc is the descriptor used by every Prometheus Metric. It is essentially
@@ -54,9 +50,9 @@ type Desc struct {
// constLabelPairs contains precalculated DTO label pairs based on
// the constant labels.
constLabelPairs []*dto.LabelPair
- // variableLabels contains names of labels for which the metric
- // maintains variable values.
- variableLabels []string
+ // variableLabels contains names of labels and normalization function for
+ // which the metric maintains variable values.
+ variableLabels ConstrainedLabels
// id is a hash of the values of the ConstLabels and fqName. This
// must be unique among all registered descriptors and can therefore be
// used as an identifier of the descriptor.
@@ -80,10 +76,24 @@ type Desc struct {
// For constLabels, the label values are constant. Therefore, they are fully
// specified in the Desc. See the Collector example for a usage pattern.
func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *Desc {
+ return V2.NewDesc(fqName, help, UnconstrainedLabels(variableLabels), constLabels)
+}
+
+// NewDesc allocates and initializes a new Desc. Errors are recorded in the Desc
+// and will be reported on registration time. variableLabels and constLabels can
+// be nil if no such labels should be set. fqName must not be empty.
+//
+// variableLabels only contain the label names and normalization functions. Their
+// label values are variable and therefore not part of the Desc. (They are managed
+// within the Metric.)
+//
+// For constLabels, the label values are constant. Therefore, they are fully
+// specified in the Desc. See the Collector example for a usage pattern.
+func (v2) NewDesc(fqName, help string, variableLabels ConstrainableLabels, constLabels Labels) *Desc {
d := &Desc{
fqName: fqName,
help: help,
- variableLabels: variableLabels,
+ variableLabels: variableLabels.constrainedLabels(),
}
if !model.IsValidMetricName(model.LabelValue(fqName)) {
d.err = fmt.Errorf("%q is not a valid metric name", fqName)
@@ -93,7 +103,7 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *
// their sorted label names) plus the fqName (at position 0).
labelValues := make([]string, 1, len(constLabels)+1)
labelValues[0] = fqName
- labelNames := make([]string, 0, len(constLabels)+len(variableLabels))
+ labelNames := make([]string, 0, len(constLabels)+len(d.variableLabels))
labelNameSet := map[string]struct{}{}
// First add only the const label names and sort them...
for labelName := range constLabels {
@@ -118,16 +128,16 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *
// Now add the variable label names, but prefix them with something that
// cannot be in a regular label name. That prevents matching the label
// dimension with a different mix between preset and variable labels.
- for _, labelName := range variableLabels {
- if !checkLabelName(labelName) {
- d.err = fmt.Errorf("%q is not a valid label name for metric %q", labelName, fqName)
+ for _, label := range d.variableLabels {
+ if !checkLabelName(label.Name) {
+ d.err = fmt.Errorf("%q is not a valid label name for metric %q", label.Name, fqName)
return d
}
- labelNames = append(labelNames, "$"+labelName)
- labelNameSet[labelName] = struct{}{}
+ labelNames = append(labelNames, "$"+label.Name)
+ labelNameSet[label.Name] = struct{}{}
}
if len(labelNames) != len(labelNameSet) {
- d.err = errors.New("duplicate label names")
+ d.err = fmt.Errorf("duplicate label names in constant and variable labels for metric %q", fqName)
return d
}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/doc.go b/vendor/github.com/prometheus/client_golang/prometheus/doc.go
index 811072cbd5..962608f02c 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/doc.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/doc.go
@@ -37,35 +37,35 @@
//
// type metrics struct {
// cpuTemp prometheus.Gauge
-// hdFailures *prometheus.CounterVec
+// hdFailures *prometheus.CounterVec
// }
//
// func NewMetrics(reg prometheus.Registerer) *metrics {
-// m := &metrics{
-// cpuTemp: prometheus.NewGauge(prometheus.GaugeOpts{
-// Name: "cpu_temperature_celsius",
-// Help: "Current temperature of the CPU.",
-// }),
-// hdFailures: prometheus.NewCounterVec(
-// prometheus.CounterOpts{
-// Name: "hd_errors_total",
-// Help: "Number of hard-disk errors.",
-// },
-// []string{"device"},
-// ),
-// }
-// reg.MustRegister(m.cpuTemp)
-// reg.MustRegister(m.hdFailures)
-// return m
+// m := &metrics{
+// cpuTemp: prometheus.NewGauge(prometheus.GaugeOpts{
+// Name: "cpu_temperature_celsius",
+// Help: "Current temperature of the CPU.",
+// }),
+// hdFailures: prometheus.NewCounterVec(
+// prometheus.CounterOpts{
+// Name: "hd_errors_total",
+// Help: "Number of hard-disk errors.",
+// },
+// []string{"device"},
+// ),
+// }
+// reg.MustRegister(m.cpuTemp)
+// reg.MustRegister(m.hdFailures)
+// return m
// }
//
// func main() {
-// // Create a non-global registry.
-// reg := prometheus.NewRegistry()
+// // Create a non-global registry.
+// reg := prometheus.NewRegistry()
//
-// // Create new metrics and register them using the custom registry.
-// m := NewMetrics(reg)
-// // Set values for the new created metrics.
+// // Create new metrics and register them using the custom registry.
+// m := NewMetrics(reg)
+// // Set values for the new created metrics.
// m.cpuTemp.Set(65.3)
// m.hdFailures.With(prometheus.Labels{"device":"/dev/sda"}).Inc()
//
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go
index 21271a5bb4..f1ea6c76f7 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go
@@ -55,6 +55,18 @@ type Gauge interface {
// GaugeOpts is an alias for Opts. See there for doc comments.
type GaugeOpts Opts
+// GaugeVecOpts bundles the options to create a GaugeVec metric.
+// It is mandatory to set GaugeOpts, see there for mandatory fields. VariableLabels
+// is optional and can safely be left to its default value.
+type GaugeVecOpts struct {
+ GaugeOpts
+
+ // VariableLabels are used to partition the metric vector by the given set
+ // of labels. Each label value will be constrained with the optional Contraint
+ // function, if provided.
+ VariableLabels ConstrainableLabels
+}
+
// NewGauge creates a new Gauge based on the provided GaugeOpts.
//
// The returned implementation is optimized for a fast Set method. If you have a
@@ -138,16 +150,24 @@ type GaugeVec struct {
// NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and
// partitioned by the given label names.
func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec {
- desc := NewDesc(
+ return V2.NewGaugeVec(GaugeVecOpts{
+ GaugeOpts: opts,
+ VariableLabels: UnconstrainedLabels(labelNames),
+ })
+}
+
+// NewGaugeVec creates a new GaugeVec based on the provided GaugeVecOpts.
+func (v2) NewGaugeVec(opts GaugeVecOpts) *GaugeVec {
+ desc := V2.NewDesc(
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
opts.Help,
- labelNames,
+ opts.VariableLabels,
opts.ConstLabels,
)
return &GaugeVec{
MetricVec: NewMetricVec(desc, func(lvs ...string) Metric {
if len(lvs) != len(desc.variableLabels) {
- panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, lvs))
+ panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.labelNames(), lvs))
}
result := &gauge{desc: desc, labelPairs: MakeLabelPairs(desc, lvs)}
result.init(result) // Init self-collection.
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go
index 3a2d55e84b..2d8d9f64f4 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go
@@ -23,11 +23,10 @@ import (
"strings"
"sync"
- //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
- "github.com/golang/protobuf/proto"
- dto "github.com/prometheus/client_model/go"
-
"github.com/prometheus/client_golang/prometheus/internal"
+
+ dto "github.com/prometheus/client_model/go"
+ "google.golang.org/protobuf/proto"
)
const (
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go
index 4c873a01c3..8d818afe90 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go
@@ -22,10 +22,9 @@ import (
"sync/atomic"
"time"
- //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
- "github.com/golang/protobuf/proto"
-
dto "github.com/prometheus/client_model/go"
+
+ "google.golang.org/protobuf/proto"
)
// nativeHistogramBounds for the frac of observed values. Only relevant for
@@ -402,7 +401,7 @@ type HistogramOpts struct {
// Histogram by a Prometheus server with that feature enabled (requires
// Prometheus v2.40+). Sparse buckets are exponential buckets covering
// the whole float64 range (with the exception of the “zero” bucket, see
- // SparseBucketsZeroThreshold below). From any one bucket to the next,
+ // NativeHistogramZeroThreshold below). From any one bucket to the next,
// the width of the bucket grows by a constant
// factor. NativeHistogramBucketFactor provides an upper bound for this
// factor (exception see below). The smaller
@@ -433,7 +432,7 @@ type HistogramOpts struct {
// bucket. For best results, this should be close to a bucket
// boundary. This is usually the case if picking a power of two. If
// NativeHistogramZeroThreshold is left at zero,
- // DefSparseBucketsZeroThreshold is used as the threshold. To configure
+ // DefNativeHistogramZeroThreshold is used as the threshold. To configure
// a zero bucket with an actual threshold of zero (i.e. only
// observations of precisely zero will go into the zero bucket), set
// NativeHistogramZeroThreshold to the NativeHistogramZeroThresholdZero
@@ -469,6 +468,18 @@ type HistogramOpts struct {
NativeHistogramMaxZeroThreshold float64
}
+// HistogramVecOpts bundles the options to create a HistogramVec metric.
+// It is mandatory to set HistogramOpts, see there for mandatory fields. VariableLabels
+// is optional and can safely be left to its default value.
+type HistogramVecOpts struct {
+ HistogramOpts
+
+ // VariableLabels are used to partition the metric vector by the given set
+ // of labels. Each label value will be constrained with the optional Contraint
+ // function, if provided.
+ VariableLabels ConstrainableLabels
+}
+
// NewHistogram creates a new Histogram based on the provided HistogramOpts. It
// panics if the buckets in HistogramOpts are not in strictly increasing order.
//
@@ -489,11 +500,11 @@ func NewHistogram(opts HistogramOpts) Histogram {
func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogram {
if len(desc.variableLabels) != len(labelValues) {
- panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, labelValues))
+ panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.labelNames(), labelValues))
}
for _, n := range desc.variableLabels {
- if n == bucketLabel {
+ if n.Name == bucketLabel {
panic(errBucketLabelNotAllowed)
}
}
@@ -544,16 +555,12 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr
}
// Finally we know the final length of h.upperBounds and can make buckets
// for both counts as well as exemplars:
- h.counts[0] = &histogramCounts{
- buckets: make([]uint64, len(h.upperBounds)),
- nativeHistogramZeroThresholdBits: math.Float64bits(h.nativeHistogramZeroThreshold),
- nativeHistogramSchema: h.nativeHistogramSchema,
- }
- h.counts[1] = &histogramCounts{
- buckets: make([]uint64, len(h.upperBounds)),
- nativeHistogramZeroThresholdBits: math.Float64bits(h.nativeHistogramZeroThreshold),
- nativeHistogramSchema: h.nativeHistogramSchema,
- }
+ h.counts[0] = &histogramCounts{buckets: make([]uint64, len(h.upperBounds))}
+ atomic.StoreUint64(&h.counts[0].nativeHistogramZeroThresholdBits, math.Float64bits(h.nativeHistogramZeroThreshold))
+ atomic.StoreInt32(&h.counts[0].nativeHistogramSchema, h.nativeHistogramSchema)
+ h.counts[1] = &histogramCounts{buckets: make([]uint64, len(h.upperBounds))}
+ atomic.StoreUint64(&h.counts[1].nativeHistogramZeroThresholdBits, math.Float64bits(h.nativeHistogramZeroThreshold))
+ atomic.StoreInt32(&h.counts[1].nativeHistogramSchema, h.nativeHistogramSchema)
h.exemplars = make([]atomic.Value, len(h.upperBounds)+1)
h.init(h) // Init self-collection.
@@ -632,8 +639,8 @@ func (hc *histogramCounts) observe(v float64, bucket int, doSparse bool) {
if frac == 0.5 {
key--
}
- div := 1 << -schema
- key = (key + div - 1) / div
+ offset := (1 << -schema) - 1
+ key = (key + offset) >> -schema
}
if isInf {
key++
@@ -810,7 +817,7 @@ func (h *histogram) observe(v float64, bucket int) {
}
}
-// limitSparsebuckets applies a strategy to limit the number of populated sparse
+// limitBuckets applies a strategy to limit the number of populated sparse
// buckets. It's generally best effort, and there are situations where the
// number can go higher (if even the lowest resolution isn't enough to reduce
// the number sufficiently, or if the provided counts aren't fully updated yet
@@ -1034,15 +1041,23 @@ type HistogramVec struct {
// NewHistogramVec creates a new HistogramVec based on the provided HistogramOpts and
// partitioned by the given label names.
func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec {
- desc := NewDesc(
+ return V2.NewHistogramVec(HistogramVecOpts{
+ HistogramOpts: opts,
+ VariableLabels: UnconstrainedLabels(labelNames),
+ })
+}
+
+// NewHistogramVec creates a new HistogramVec based on the provided HistogramVecOpts.
+func (v2) NewHistogramVec(opts HistogramVecOpts) *HistogramVec {
+ desc := V2.NewDesc(
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
opts.Help,
- labelNames,
+ opts.VariableLabels,
opts.ConstLabels,
)
return &HistogramVec{
MetricVec: NewMetricVec(desc, func(lvs ...string) Metric {
- return newHistogram(desc, opts, lvs...)
+ return newHistogram(desc, opts.HistogramOpts, lvs...)
}),
}
}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/labels.go b/vendor/github.com/prometheus/client_golang/prometheus/labels.go
index c1b8fad36a..63ff8683ce 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/labels.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/labels.go
@@ -32,6 +32,78 @@ import (
// create a Desc.
type Labels map[string]string
+// ConstrainedLabels represents a label name and its constrain function
+// to normalize label values. This type is commonly used when constructing
+// metric vector Collectors.
+type ConstrainedLabel struct {
+ Name string
+ Constraint func(string) string
+}
+
+func (cl ConstrainedLabel) Constrain(v string) string {
+ if cl.Constraint == nil {
+ return v
+ }
+ return cl.Constraint(v)
+}
+
+// ConstrainableLabels is an interface that allows creating of labels that can
+// be optionally constrained.
+//
+// prometheus.V2().NewCounterVec(CounterVecOpts{
+// CounterOpts: {...}, // Usual CounterOpts fields
+// VariableLabels: []ConstrainedLabels{
+// {Name: "A"},
+// {Name: "B", Constraint: func(v string) string { ... }},
+// },
+// })
+type ConstrainableLabels interface {
+ constrainedLabels() ConstrainedLabels
+ labelNames() []string
+}
+
+// ConstrainedLabels represents a collection of label name -> constrain function
+// to normalize label values. This type is commonly used when constructing
+// metric vector Collectors.
+type ConstrainedLabels []ConstrainedLabel
+
+func (cls ConstrainedLabels) constrainedLabels() ConstrainedLabels {
+ return cls
+}
+
+func (cls ConstrainedLabels) labelNames() []string {
+ names := make([]string, len(cls))
+ for i, label := range cls {
+ names[i] = label.Name
+ }
+ return names
+}
+
+// UnconstrainedLabels represents collection of label without any constraint on
+// their value. Thus, it is simply a collection of label names.
+//
+// UnconstrainedLabels([]string{ "A", "B" })
+//
+// is equivalent to
+//
+// ConstrainedLabels {
+// { Name: "A" },
+// { Name: "B" },
+// }
+type UnconstrainedLabels []string
+
+func (uls UnconstrainedLabels) constrainedLabels() ConstrainedLabels {
+ constrainedLabels := make([]ConstrainedLabel, len(uls))
+ for i, l := range uls {
+ constrainedLabels[i] = ConstrainedLabel{Name: l}
+ }
+ return constrainedLabels
+}
+
+func (uls UnconstrainedLabels) labelNames() []string {
+ return uls
+}
+
// reservedLabelPrefix is a prefix which is not legal in user-supplied
// label names.
const reservedLabelPrefix = "__"
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/metric.go
index b5119c5041..07bbc9d768 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/metric.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/metric.go
@@ -20,11 +20,9 @@ import (
"strings"
"time"
- //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
- "github.com/golang/protobuf/proto"
- "github.com/prometheus/common/model"
-
dto "github.com/prometheus/client_model/go"
+ "github.com/prometheus/common/model"
+ "google.golang.org/protobuf/proto"
)
var separatorByteSlice = []byte{model.SeparatorByte} // For convenient use with xxhash.
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go
index a4cc9810b0..09b8d2fbea 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go
@@ -37,6 +37,7 @@ import (
"fmt"
"io"
"net/http"
+ "strconv"
"strings"
"sync"
"time"
@@ -47,9 +48,10 @@ import (
)
const (
- contentTypeHeader = "Content-Type"
- contentEncodingHeader = "Content-Encoding"
- acceptEncodingHeader = "Accept-Encoding"
+ contentTypeHeader = "Content-Type"
+ contentEncodingHeader = "Content-Encoding"
+ acceptEncodingHeader = "Accept-Encoding"
+ processStartTimeHeader = "Process-Start-Time-Unix"
)
var gzipPool = sync.Pool{
@@ -121,6 +123,9 @@ func HandlerForTransactional(reg prometheus.TransactionalGatherer, opts HandlerO
}
h := http.HandlerFunc(func(rsp http.ResponseWriter, req *http.Request) {
+ if !opts.ProcessStartTime.IsZero() {
+ rsp.Header().Set(processStartTimeHeader, strconv.FormatInt(opts.ProcessStartTime.Unix(), 10))
+ }
if inFlightSem != nil {
select {
case inFlightSem <- struct{}{}: // All good, carry on.
@@ -366,6 +371,14 @@ type HandlerOpts struct {
// (which changes the identity of the resulting series on the Prometheus
// server).
EnableOpenMetrics bool
+ // ProcessStartTime allows setting process start timevalue that will be exposed
+ // with "Process-Start-Time-Unix" response header along with the metrics
+ // payload. This allow callers to have efficient transformations to cumulative
+ // counters (e.g. OpenTelemetry) or generally _created timestamp estimation per
+ // scrape target.
+ // NOTE: This feature is experimental and not covered by OpenMetrics or Prometheus
+ // exposition format.
+ ProcessStartTime time.Time
}
// gzipAccepted returns whether the client will accept gzip-encoded content.
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go
index 2108678162..d3482c40ca 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go
@@ -68,16 +68,17 @@ func InstrumentRoundTripperCounter(counter *prometheus.CounterVec, next http.Rou
o.apply(rtOpts)
}
- code, method := checkLabels(counter)
+ // Curry the counter with dynamic labels before checking the remaining labels.
+ code, method := checkLabels(counter.MustCurryWith(rtOpts.emptyDynamicLabels()))
return func(r *http.Request) (*http.Response, error) {
resp, err := next.RoundTrip(r)
if err == nil {
- addWithExemplar(
- counter.With(labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)),
- 1,
- rtOpts.getExemplarFn(r.Context()),
- )
+ l := labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)
+ for label, resolve := range rtOpts.extraLabelsFromCtx {
+ l[label] = resolve(resp.Request.Context())
+ }
+ addWithExemplar(counter.With(l), 1, rtOpts.getExemplarFn(r.Context()))
}
return resp, err
}
@@ -110,17 +111,18 @@ func InstrumentRoundTripperDuration(obs prometheus.ObserverVec, next http.RoundT
o.apply(rtOpts)
}
- code, method := checkLabels(obs)
+ // Curry the observer with dynamic labels before checking the remaining labels.
+ code, method := checkLabels(obs.MustCurryWith(rtOpts.emptyDynamicLabels()))
return func(r *http.Request) (*http.Response, error) {
start := time.Now()
resp, err := next.RoundTrip(r)
if err == nil {
- observeWithExemplar(
- obs.With(labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)),
- time.Since(start).Seconds(),
- rtOpts.getExemplarFn(r.Context()),
- )
+ l := labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)
+ for label, resolve := range rtOpts.extraLabelsFromCtx {
+ l[label] = resolve(resp.Request.Context())
+ }
+ observeWithExemplar(obs.With(l), time.Since(start).Seconds(), rtOpts.getExemplarFn(r.Context()))
}
return resp, err
}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go
index cca67a78a9..3793036ad0 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go
@@ -87,7 +87,8 @@ func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler, op
o.apply(hOpts)
}
- code, method := checkLabels(obs)
+ // Curry the observer with dynamic labels before checking the remaining labels.
+ code, method := checkLabels(obs.MustCurryWith(hOpts.emptyDynamicLabels()))
if code {
return func(w http.ResponseWriter, r *http.Request) {
@@ -95,23 +96,22 @@ func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler, op
d := newDelegator(w, nil)
next.ServeHTTP(d, r)
- observeWithExemplar(
- obs.With(labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)),
- time.Since(now).Seconds(),
- hOpts.getExemplarFn(r.Context()),
- )
+ l := labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)
+ for label, resolve := range hOpts.extraLabelsFromCtx {
+ l[label] = resolve(r.Context())
+ }
+ observeWithExemplar(obs.With(l), time.Since(now).Seconds(), hOpts.getExemplarFn(r.Context()))
}
}
return func(w http.ResponseWriter, r *http.Request) {
now := time.Now()
next.ServeHTTP(w, r)
-
- observeWithExemplar(
- obs.With(labels(code, method, r.Method, 0, hOpts.extraMethods...)),
- time.Since(now).Seconds(),
- hOpts.getExemplarFn(r.Context()),
- )
+ l := labels(code, method, r.Method, 0, hOpts.extraMethods...)
+ for label, resolve := range hOpts.extraLabelsFromCtx {
+ l[label] = resolve(r.Context())
+ }
+ observeWithExemplar(obs.With(l), time.Since(now).Seconds(), hOpts.getExemplarFn(r.Context()))
}
}
@@ -138,28 +138,30 @@ func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler,
o.apply(hOpts)
}
- code, method := checkLabels(counter)
+ // Curry the counter with dynamic labels before checking the remaining labels.
+ code, method := checkLabels(counter.MustCurryWith(hOpts.emptyDynamicLabels()))
if code {
return func(w http.ResponseWriter, r *http.Request) {
d := newDelegator(w, nil)
next.ServeHTTP(d, r)
- addWithExemplar(
- counter.With(labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)),
- 1,
- hOpts.getExemplarFn(r.Context()),
- )
+ l := labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)
+ for label, resolve := range hOpts.extraLabelsFromCtx {
+ l[label] = resolve(r.Context())
+ }
+ addWithExemplar(counter.With(l), 1, hOpts.getExemplarFn(r.Context()))
}
}
return func(w http.ResponseWriter, r *http.Request) {
next.ServeHTTP(w, r)
- addWithExemplar(
- counter.With(labels(code, method, r.Method, 0, hOpts.extraMethods...)),
- 1,
- hOpts.getExemplarFn(r.Context()),
- )
+
+ l := labels(code, method, r.Method, 0, hOpts.extraMethods...)
+ for label, resolve := range hOpts.extraLabelsFromCtx {
+ l[label] = resolve(r.Context())
+ }
+ addWithExemplar(counter.With(l), 1, hOpts.getExemplarFn(r.Context()))
}
}
@@ -191,16 +193,17 @@ func InstrumentHandlerTimeToWriteHeader(obs prometheus.ObserverVec, next http.Ha
o.apply(hOpts)
}
- code, method := checkLabels(obs)
+ // Curry the observer with dynamic labels before checking the remaining labels.
+ code, method := checkLabels(obs.MustCurryWith(hOpts.emptyDynamicLabels()))
return func(w http.ResponseWriter, r *http.Request) {
now := time.Now()
d := newDelegator(w, func(status int) {
- observeWithExemplar(
- obs.With(labels(code, method, r.Method, status, hOpts.extraMethods...)),
- time.Since(now).Seconds(),
- hOpts.getExemplarFn(r.Context()),
- )
+ l := labels(code, method, r.Method, status, hOpts.extraMethods...)
+ for label, resolve := range hOpts.extraLabelsFromCtx {
+ l[label] = resolve(r.Context())
+ }
+ observeWithExemplar(obs.With(l), time.Since(now).Seconds(), hOpts.getExemplarFn(r.Context()))
})
next.ServeHTTP(d, r)
}
@@ -231,28 +234,32 @@ func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler,
o.apply(hOpts)
}
- code, method := checkLabels(obs)
+ // Curry the observer with dynamic labels before checking the remaining labels.
+ code, method := checkLabels(obs.MustCurryWith(hOpts.emptyDynamicLabels()))
+
if code {
return func(w http.ResponseWriter, r *http.Request) {
d := newDelegator(w, nil)
next.ServeHTTP(d, r)
size := computeApproximateRequestSize(r)
- observeWithExemplar(
- obs.With(labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)),
- float64(size),
- hOpts.getExemplarFn(r.Context()),
- )
+
+ l := labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)
+ for label, resolve := range hOpts.extraLabelsFromCtx {
+ l[label] = resolve(r.Context())
+ }
+ observeWithExemplar(obs.With(l), float64(size), hOpts.getExemplarFn(r.Context()))
}
}
return func(w http.ResponseWriter, r *http.Request) {
next.ServeHTTP(w, r)
size := computeApproximateRequestSize(r)
- observeWithExemplar(
- obs.With(labels(code, method, r.Method, 0, hOpts.extraMethods...)),
- float64(size),
- hOpts.getExemplarFn(r.Context()),
- )
+
+ l := labels(code, method, r.Method, 0, hOpts.extraMethods...)
+ for label, resolve := range hOpts.extraLabelsFromCtx {
+ l[label] = resolve(r.Context())
+ }
+ observeWithExemplar(obs.With(l), float64(size), hOpts.getExemplarFn(r.Context()))
}
}
@@ -281,16 +288,18 @@ func InstrumentHandlerResponseSize(obs prometheus.ObserverVec, next http.Handler
o.apply(hOpts)
}
- code, method := checkLabels(obs)
+ // Curry the observer with dynamic labels before checking the remaining labels.
+ code, method := checkLabels(obs.MustCurryWith(hOpts.emptyDynamicLabels()))
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
d := newDelegator(w, nil)
next.ServeHTTP(d, r)
- observeWithExemplar(
- obs.With(labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)),
- float64(d.Written()),
- hOpts.getExemplarFn(r.Context()),
- )
+
+ l := labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)
+ for label, resolve := range hOpts.extraLabelsFromCtx {
+ l[label] = resolve(r.Context())
+ }
+ observeWithExemplar(obs.With(l), float64(d.Written()), hOpts.getExemplarFn(r.Context()))
})
}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/option.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/option.go
index c590d912c9..5d4383aa14 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/option.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/option.go
@@ -24,14 +24,32 @@ type Option interface {
apply(*options)
}
+// LabelValueFromCtx are used to compute the label value from request context.
+// Context can be filled with values from request through middleware.
+type LabelValueFromCtx func(ctx context.Context) string
+
// options store options for both a handler or round tripper.
type options struct {
- extraMethods []string
- getExemplarFn func(requestCtx context.Context) prometheus.Labels
+ extraMethods []string
+ getExemplarFn func(requestCtx context.Context) prometheus.Labels
+ extraLabelsFromCtx map[string]LabelValueFromCtx
}
func defaultOptions() *options {
- return &options{getExemplarFn: func(ctx context.Context) prometheus.Labels { return nil }}
+ return &options{
+ getExemplarFn: func(ctx context.Context) prometheus.Labels { return nil },
+ extraLabelsFromCtx: map[string]LabelValueFromCtx{},
+ }
+}
+
+func (o *options) emptyDynamicLabels() prometheus.Labels {
+ labels := prometheus.Labels{}
+
+ for label := range o.extraLabelsFromCtx {
+ labels[label] = ""
+ }
+
+ return labels
}
type optionApplyFunc func(*options)
@@ -48,11 +66,19 @@ func WithExtraMethods(methods ...string) Option {
})
}
-// WithExemplarFromContext adds allows to put a hook to all counter and histogram metrics.
-// If the hook function returns non-nil labels, exemplars will be added for that request, otherwise metric
-// will get instrumented without exemplar.
+// WithExemplarFromContext allows to inject function that will get exemplar from context that will be put to counter and histogram metrics.
+// If the function returns nil labels or the metric does not support exemplars, no exemplar will be added (noop), but
+// metric will continue to observe/increment.
func WithExemplarFromContext(getExemplarFn func(requestCtx context.Context) prometheus.Labels) Option {
return optionApplyFunc(func(o *options) {
o.getExemplarFn = getExemplarFn
})
}
+
+// WithLabelFromCtx registers a label for dynamic resolution with access to context.
+// See the example for ExampleInstrumentHandlerWithLabelResolver for example usage
+func WithLabelFromCtx(name string, valueFn LabelValueFromCtx) Option {
+ return optionApplyFunc(func(o *options) {
+ o.extraLabelsFromCtx[name] = valueFn
+ })
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/registry.go b/vendor/github.com/prometheus/client_golang/prometheus/registry.go
index 09e34d307c..44da9433be 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/registry.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/registry.go
@@ -21,18 +21,17 @@ import (
"path/filepath"
"runtime"
"sort"
+ "strconv"
"strings"
"sync"
"unicode/utf8"
- "github.com/cespare/xxhash/v2"
- //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
- "github.com/golang/protobuf/proto"
- "github.com/prometheus/common/expfmt"
+ "github.com/prometheus/client_golang/prometheus/internal"
+ "github.com/cespare/xxhash/v2"
dto "github.com/prometheus/client_model/go"
-
- "github.com/prometheus/client_golang/prometheus/internal"
+ "github.com/prometheus/common/expfmt"
+ "google.golang.org/protobuf/proto"
)
const (
@@ -933,6 +932,10 @@ func checkMetricConsistency(
h.WriteString(lp.GetValue())
h.Write(separatorByteSlice)
}
+ if dtoMetric.TimestampMs != nil {
+ h.WriteString(strconv.FormatInt(*(dtoMetric.TimestampMs), 10))
+ h.Write(separatorByteSlice)
+ }
hSum := h.Sum64()
if _, exists := metricHashes[hSum]; exists {
return fmt.Errorf(
@@ -962,7 +965,7 @@ func checkDescConsistency(
copy(lpsFromDesc, desc.constLabelPairs)
for _, l := range desc.variableLabels {
lpsFromDesc = append(lpsFromDesc, &dto.LabelPair{
- Name: proto.String(l),
+ Name: proto.String(l.Name),
})
}
if len(lpsFromDesc) != len(dtoMetric.Label) {
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/summary.go b/vendor/github.com/prometheus/client_golang/prometheus/summary.go
index 7bc448a893..dd359264e5 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/summary.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/summary.go
@@ -22,11 +22,10 @@ import (
"sync/atomic"
"time"
- "github.com/beorn7/perks/quantile"
- //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
- "github.com/golang/protobuf/proto"
-
dto "github.com/prometheus/client_model/go"
+
+ "github.com/beorn7/perks/quantile"
+ "google.golang.org/protobuf/proto"
)
// quantileLabel is used for the label that defines the quantile in a
@@ -148,6 +147,18 @@ type SummaryOpts struct {
BufCap uint32
}
+// SummaryVecOpts bundles the options to create a SummaryVec metric.
+// It is mandatory to set SummaryOpts, see there for mandatory fields. VariableLabels
+// is optional and can safely be left to its default value.
+type SummaryVecOpts struct {
+ SummaryOpts
+
+ // VariableLabels are used to partition the metric vector by the given set
+ // of labels. Each label value will be constrained with the optional Contraint
+ // function, if provided.
+ VariableLabels ConstrainableLabels
+}
+
// Problem with the sliding-window decay algorithm... The Merge method of
// perk/quantile is actually not working as advertised - and it might be
// unfixable, as the underlying algorithm is apparently not capable of merging
@@ -178,11 +189,11 @@ func NewSummary(opts SummaryOpts) Summary {
func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary {
if len(desc.variableLabels) != len(labelValues) {
- panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, labelValues))
+ panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.labelNames(), labelValues))
}
for _, n := range desc.variableLabels {
- if n == quantileLabel {
+ if n.Name == quantileLabel {
panic(errQuantileLabelNotAllowed)
}
}
@@ -530,20 +541,28 @@ type SummaryVec struct {
// it is handled by the Prometheus server internally, “quantile” is an illegal
// label name. NewSummaryVec will panic if this label name is used.
func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec {
- for _, ln := range labelNames {
+ return V2.NewSummaryVec(SummaryVecOpts{
+ SummaryOpts: opts,
+ VariableLabels: UnconstrainedLabels(labelNames),
+ })
+}
+
+// NewSummaryVec creates a new SummaryVec based on the provided SummaryVecOpts.
+func (v2) NewSummaryVec(opts SummaryVecOpts) *SummaryVec {
+ for _, ln := range opts.VariableLabels.labelNames() {
if ln == quantileLabel {
panic(errQuantileLabelNotAllowed)
}
}
- desc := NewDesc(
+ desc := V2.NewDesc(
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
opts.Help,
- labelNames,
+ opts.VariableLabels,
opts.ConstLabels,
)
return &SummaryVec{
MetricVec: NewMetricVec(desc, func(lvs ...string) Metric {
- return newSummary(desc, opts, lvs...)
+ return newSummary(desc, opts.SummaryOpts, lvs...)
}),
}
}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/promlint.go b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/promlint.go
index a20f159b78..c8864b6c3f 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/promlint.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/promlint.go
@@ -287,17 +287,15 @@ func lintUnitAbbreviations(mf *dto.MetricFamily) []Problem {
func metricUnits(m string) (unit, base string, ok bool) {
ss := strings.Split(m, "_")
- for unit, base := range units {
- // Also check for "no prefix".
- for _, p := range append(unitPrefixes, "") {
- for _, s := range ss {
- // Attempt to explicitly match a known unit with a known prefix,
- // as some words may look like "units" when matching suffix.
- //
- // As an example, "thermometers" should not match "meters", but
- // "kilometers" should.
- if s == p+unit {
- return p + unit, base, true
+ for _, s := range ss {
+ if base, found := units[s]; found {
+ return s, base, true
+ }
+
+ for _, p := range unitPrefixes {
+ if strings.HasPrefix(s, p) {
+ if base, found := units[s[len(p):]]; found {
+ return s, base, true
}
}
}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/testutil/testutil.go b/vendor/github.com/prometheus/client_golang/prometheus/testutil/testutil.go
index 91b83b5285..82d4a5436b 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/testutil/testutil.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/testutil/testutil.go
@@ -238,6 +238,7 @@ func convertReaderToMetricFamily(reader io.Reader) ([]*dto.MetricFamily, error)
func compareMetricFamilies(got, expected []*dto.MetricFamily, metricNames ...string) error {
if metricNames != nil {
got = filterMetrics(got, metricNames)
+ expected = filterMetrics(expected, metricNames)
}
return compare(got, expected)
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/timer.go b/vendor/github.com/prometheus/client_golang/prometheus/timer.go
index f28a76f3a6..52344fef53 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/timer.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/timer.go
@@ -23,7 +23,9 @@ type Timer struct {
}
// NewTimer creates a new Timer. The provided Observer is used to observe a
-// duration in seconds. Timer is usually used to time a function call in the
+// duration in seconds. If the Observer implements ExemplarObserver, passing exemplar
+// later on will be also supported.
+// Timer is usually used to time a function call in the
// following way:
//
// func TimeMe() {
@@ -31,6 +33,14 @@ type Timer struct {
// defer timer.ObserveDuration()
// // Do actual work.
// }
+//
+// or
+//
+// func TimeMeWithExemplar() {
+// timer := NewTimer(myHistogram)
+// defer timer.ObserveDurationWithExemplar(exemplar)
+// // Do actual work.
+// }
func NewTimer(o Observer) *Timer {
return &Timer{
begin: time.Now(),
@@ -53,3 +63,19 @@ func (t *Timer) ObserveDuration() time.Duration {
}
return d
}
+
+// ObserveDurationWithExemplar is like ObserveDuration, but it will also
+// observe exemplar with the duration unless exemplar is nil or provided Observer can't
+// be casted to ExemplarObserver.
+func (t *Timer) ObserveDurationWithExemplar(exemplar Labels) time.Duration {
+ d := time.Since(t.begin)
+ eo, ok := t.observer.(ExemplarObserver)
+ if ok && exemplar != nil {
+ eo.ObserveWithExemplar(d.Seconds(), exemplar)
+ return d
+ }
+ if t.observer != nil {
+ t.observer.Observe(d.Seconds())
+ }
+ return d
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/value.go b/vendor/github.com/prometheus/client_golang/prometheus/value.go
index 2d3abc1cbd..5f6bb80014 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/value.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/value.go
@@ -19,13 +19,11 @@ import (
"time"
"unicode/utf8"
- //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
- "github.com/golang/protobuf/proto"
- "google.golang.org/protobuf/types/known/timestamppb"
-
"github.com/prometheus/client_golang/prometheus/internal"
dto "github.com/prometheus/client_model/go"
+ "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/types/known/timestamppb"
)
// ValueType is an enumeration of metric types that represent a simple value.
@@ -188,9 +186,9 @@ func MakeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair {
return desc.constLabelPairs
}
labelPairs := make([]*dto.LabelPair, 0, totalLen)
- for i, n := range desc.variableLabels {
+ for i, l := range desc.variableLabels {
labelPairs = append(labelPairs, &dto.LabelPair{
- Name: proto.String(n),
+ Name: proto.String(l.Name),
Value: proto.String(labelValues[i]),
})
}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/vec.go b/vendor/github.com/prometheus/client_golang/prometheus/vec.go
index 7ae322590c..f0d0015a0f 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/vec.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/vec.go
@@ -20,6 +20,24 @@ import (
"github.com/prometheus/common/model"
)
+var labelsPool = &sync.Pool{
+ New: func() interface{} {
+ return make(Labels)
+ },
+}
+
+func getLabelsFromPool() Labels {
+ return labelsPool.Get().(Labels)
+}
+
+func putLabelsToPool(labels Labels) {
+ for k := range labels {
+ delete(labels, k)
+ }
+
+ labelsPool.Put(labels)
+}
+
// MetricVec is a Collector to bundle metrics of the same name that differ in
// their label values. MetricVec is not used directly but as a building block
// for implementations of vectors of a given metric type, like GaugeVec,
@@ -72,6 +90,7 @@ func NewMetricVec(desc *Desc, newMetric func(lvs ...string) Metric) *MetricVec {
// with a performance overhead (for creating and processing the Labels map).
// See also the CounterVec example.
func (m *MetricVec) DeleteLabelValues(lvs ...string) bool {
+ lvs = constrainLabelValues(m.desc, lvs, m.curry)
h, err := m.hashLabelValues(lvs)
if err != nil {
return false
@@ -91,6 +110,9 @@ func (m *MetricVec) DeleteLabelValues(lvs ...string) bool {
// This method is used for the same purpose as DeleteLabelValues(...string). See
// there for pros and cons of the two methods.
func (m *MetricVec) Delete(labels Labels) bool {
+ labels = constrainLabels(m.desc, labels)
+ defer putLabelsToPool(labels)
+
h, err := m.hashLabels(labels)
if err != nil {
return false
@@ -106,6 +128,9 @@ func (m *MetricVec) Delete(labels Labels) bool {
// Note that curried labels will never be matched if deleting from the curried vector.
// To match curried labels with DeletePartialMatch, it must be called on the base vector.
func (m *MetricVec) DeletePartialMatch(labels Labels) int {
+ labels = constrainLabels(m.desc, labels)
+ defer putLabelsToPool(labels)
+
return m.metricMap.deleteByLabels(labels, m.curry)
}
@@ -145,10 +170,10 @@ func (m *MetricVec) CurryWith(labels Labels) (*MetricVec, error) {
iCurry int
)
for i, label := range m.desc.variableLabels {
- val, ok := labels[label]
+ val, ok := labels[label.Name]
if iCurry < len(oldCurry) && oldCurry[iCurry].index == i {
if ok {
- return nil, fmt.Errorf("label name %q is already curried", label)
+ return nil, fmt.Errorf("label name %q is already curried", label.Name)
}
newCurry = append(newCurry, oldCurry[iCurry])
iCurry++
@@ -156,7 +181,7 @@ func (m *MetricVec) CurryWith(labels Labels) (*MetricVec, error) {
if !ok {
continue // Label stays uncurried.
}
- newCurry = append(newCurry, curriedLabelValue{i, val})
+ newCurry = append(newCurry, curriedLabelValue{i, label.Constrain(val)})
}
}
if l := len(oldCurry) + len(labels) - len(newCurry); l > 0 {
@@ -199,6 +224,7 @@ func (m *MetricVec) CurryWith(labels Labels) (*MetricVec, error) {
// a wrapper around MetricVec, implementing a vector for a specific Metric
// implementation, for example GaugeVec.
func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) {
+ lvs = constrainLabelValues(m.desc, lvs, m.curry)
h, err := m.hashLabelValues(lvs)
if err != nil {
return nil, err
@@ -224,6 +250,9 @@ func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) {
// around MetricVec, implementing a vector for a specific Metric implementation,
// for example GaugeVec.
func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) {
+ labels = constrainLabels(m.desc, labels)
+ defer putLabelsToPool(labels)
+
h, err := m.hashLabels(labels)
if err != nil {
return nil, err
@@ -266,16 +295,16 @@ func (m *MetricVec) hashLabels(labels Labels) (uint64, error) {
iCurry int
)
for i, label := range m.desc.variableLabels {
- val, ok := labels[label]
+ val, ok := labels[label.Name]
if iCurry < len(curry) && curry[iCurry].index == i {
if ok {
- return 0, fmt.Errorf("label name %q is already curried", label)
+ return 0, fmt.Errorf("label name %q is already curried", label.Name)
}
h = m.hashAdd(h, curry[iCurry].value)
iCurry++
} else {
if !ok {
- return 0, fmt.Errorf("label name %q missing in label map", label)
+ return 0, fmt.Errorf("label name %q missing in label map", label.Name)
}
h = m.hashAdd(h, val)
}
@@ -453,7 +482,7 @@ func valueMatchesVariableOrCurriedValue(targetValue string, index int, values []
func matchPartialLabels(desc *Desc, values []string, labels Labels, curry []curriedLabelValue) bool {
for l, v := range labels {
// Check if the target label exists in our metrics and get the index.
- varLabelIndex, validLabel := indexOf(l, desc.variableLabels)
+ varLabelIndex, validLabel := indexOf(l, desc.variableLabels.labelNames())
if validLabel {
// Check the value of that label against the target value.
// We don't consider curried values in partial matches.
@@ -605,7 +634,7 @@ func matchLabels(desc *Desc, values []string, labels Labels, curry []curriedLabe
iCurry++
continue
}
- if values[i] != labels[k] {
+ if values[i] != labels[k.Name] {
return false
}
}
@@ -621,7 +650,7 @@ func extractLabelValues(desc *Desc, labels Labels, curry []curriedLabelValue) []
iCurry++
continue
}
- labelValues[i] = labels[k]
+ labelValues[i] = labels[k.Name]
}
return labelValues
}
@@ -640,3 +669,35 @@ func inlineLabelValues(lvs []string, curry []curriedLabelValue) []string {
}
return labelValues
}
+
+func constrainLabels(desc *Desc, labels Labels) Labels {
+ constrainedLabels := getLabelsFromPool()
+ for l, v := range labels {
+ if i, ok := indexOf(l, desc.variableLabels.labelNames()); ok {
+ v = desc.variableLabels[i].Constrain(v)
+ }
+
+ constrainedLabels[l] = v
+ }
+
+ return constrainedLabels
+}
+
+func constrainLabelValues(desc *Desc, lvs []string, curry []curriedLabelValue) []string {
+ constrainedValues := make([]string, len(lvs))
+ var iCurry, iLVs int
+ for i := 0; i < len(lvs)+len(curry); i++ {
+ if iCurry < len(curry) && curry[iCurry].index == i {
+ iCurry++
+ continue
+ }
+
+ if i < len(desc.variableLabels) {
+ constrainedValues[iLVs] = desc.variableLabels[i].Constrain(lvs[iLVs])
+ } else {
+ constrainedValues[iLVs] = lvs[iLVs]
+ }
+ iLVs++
+ }
+ return constrainedValues
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/vnext.go b/vendor/github.com/prometheus/client_golang/prometheus/vnext.go
new file mode 100644
index 0000000000..42bc3a8f06
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/vnext.go
@@ -0,0 +1,23 @@
+// Copyright 2022 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+type v2 struct{}
+
+// V2 is a struct that can be referenced to access experimental API that might
+// be present in v2 of client golang someday. It offers extended functionality
+// of v1 with slightly changed API. It is acceptable to use some pieces from v1
+// and e.g `prometheus.NewGauge` and some from v2 e.g. `prometheus.V2.NewDesc`
+// in the same codebase.
+var V2 = v2{}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/wrap.go b/vendor/github.com/prometheus/client_golang/prometheus/wrap.go
index 1498ee144c..25da157f15 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/wrap.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/wrap.go
@@ -17,12 +17,10 @@ import (
"fmt"
"sort"
- //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
- "github.com/golang/protobuf/proto"
+ "github.com/prometheus/client_golang/prometheus/internal"
dto "github.com/prometheus/client_model/go"
-
- "github.com/prometheus/client_golang/prometheus/internal"
+ "google.golang.org/protobuf/proto"
)
// WrapRegistererWith returns a Registerer wrapping the provided
@@ -206,7 +204,7 @@ func wrapDesc(desc *Desc, prefix string, labels Labels) *Desc {
constLabels[ln] = lv
}
// NewDesc will do remaining validations.
- newDesc := NewDesc(prefix+desc.fqName, desc.help, desc.variableLabels, constLabels)
+ newDesc := V2.NewDesc(prefix+desc.fqName, desc.help, desc.variableLabels, constLabels)
// Propagate errors if there was any. This will override any errer
// created by NewDesc above, i.e. earlier errors get precedence.
if desc.err != nil {
diff --git a/vendor/github.com/prometheus/client_model/go/metrics.pb.go b/vendor/github.com/prometheus/client_model/go/metrics.pb.go
index 35904ea198..2b5bca4b99 100644
--- a/vendor/github.com/prometheus/client_model/go/metrics.pb.go
+++ b/vendor/github.com/prometheus/client_model/go/metrics.pb.go
@@ -1,25 +1,38 @@
+// Copyright 2013 Prometheus Team
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.30.0
+// protoc v3.20.3
// source: io/prometheus/client/metrics.proto
package io_prometheus_client
import (
- fmt "fmt"
- proto "github.com/golang/protobuf/proto"
- timestamp "github.com/golang/protobuf/ptypes/timestamp"
- math "math"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ timestamppb "google.golang.org/protobuf/types/known/timestamppb"
+ reflect "reflect"
+ sync "sync"
)
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
type MetricType int32
@@ -38,23 +51,25 @@ const (
MetricType_GAUGE_HISTOGRAM MetricType = 5
)
-var MetricType_name = map[int32]string{
- 0: "COUNTER",
- 1: "GAUGE",
- 2: "SUMMARY",
- 3: "UNTYPED",
- 4: "HISTOGRAM",
- 5: "GAUGE_HISTOGRAM",
-}
-
-var MetricType_value = map[string]int32{
- "COUNTER": 0,
- "GAUGE": 1,
- "SUMMARY": 2,
- "UNTYPED": 3,
- "HISTOGRAM": 4,
- "GAUGE_HISTOGRAM": 5,
-}
+// Enum value maps for MetricType.
+var (
+ MetricType_name = map[int32]string{
+ 0: "COUNTER",
+ 1: "GAUGE",
+ 2: "SUMMARY",
+ 3: "UNTYPED",
+ 4: "HISTOGRAM",
+ 5: "GAUGE_HISTOGRAM",
+ }
+ MetricType_value = map[string]int32{
+ "COUNTER": 0,
+ "GAUGE": 1,
+ "SUMMARY": 2,
+ "UNTYPED": 3,
+ "HISTOGRAM": 4,
+ "GAUGE_HISTOGRAM": 5,
+ }
+)
func (x MetricType) Enum() *MetricType {
p := new(MetricType)
@@ -63,449 +78,519 @@ func (x MetricType) Enum() *MetricType {
}
func (x MetricType) String() string {
- return proto.EnumName(MetricType_name, int32(x))
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
-func (x *MetricType) UnmarshalJSON(data []byte) error {
- value, err := proto.UnmarshalJSONEnum(MetricType_value, data, "MetricType")
+func (MetricType) Descriptor() protoreflect.EnumDescriptor {
+ return file_io_prometheus_client_metrics_proto_enumTypes[0].Descriptor()
+}
+
+func (MetricType) Type() protoreflect.EnumType {
+ return &file_io_prometheus_client_metrics_proto_enumTypes[0]
+}
+
+func (x MetricType) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Do not use.
+func (x *MetricType) UnmarshalJSON(b []byte) error {
+ num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b)
if err != nil {
return err
}
- *x = MetricType(value)
+ *x = MetricType(num)
return nil
}
+// Deprecated: Use MetricType.Descriptor instead.
func (MetricType) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_d1e5ddb18987a258, []int{0}
+ return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{0}
}
type LabelPair struct {
- Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
- Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
-func (m *LabelPair) Reset() { *m = LabelPair{} }
-func (m *LabelPair) String() string { return proto.CompactTextString(m) }
-func (*LabelPair) ProtoMessage() {}
-func (*LabelPair) Descriptor() ([]byte, []int) {
- return fileDescriptor_d1e5ddb18987a258, []int{0}
+ Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"`
}
-func (m *LabelPair) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_LabelPair.Unmarshal(m, b)
-}
-func (m *LabelPair) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_LabelPair.Marshal(b, m, deterministic)
-}
-func (m *LabelPair) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LabelPair.Merge(m, src)
+func (x *LabelPair) Reset() {
+ *x = LabelPair{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (m *LabelPair) XXX_Size() int {
- return xxx_messageInfo_LabelPair.Size(m)
+
+func (x *LabelPair) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *LabelPair) XXX_DiscardUnknown() {
- xxx_messageInfo_LabelPair.DiscardUnknown(m)
+
+func (*LabelPair) ProtoMessage() {}
+
+func (x *LabelPair) ProtoReflect() protoreflect.Message {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-var xxx_messageInfo_LabelPair proto.InternalMessageInfo
+// Deprecated: Use LabelPair.ProtoReflect.Descriptor instead.
+func (*LabelPair) Descriptor() ([]byte, []int) {
+ return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{0}
+}
-func (m *LabelPair) GetName() string {
- if m != nil && m.Name != nil {
- return *m.Name
+func (x *LabelPair) GetName() string {
+ if x != nil && x.Name != nil {
+ return *x.Name
}
return ""
}
-func (m *LabelPair) GetValue() string {
- if m != nil && m.Value != nil {
- return *m.Value
+func (x *LabelPair) GetValue() string {
+ if x != nil && x.Value != nil {
+ return *x.Value
}
return ""
}
type Gauge struct {
- Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
-func (m *Gauge) Reset() { *m = Gauge{} }
-func (m *Gauge) String() string { return proto.CompactTextString(m) }
-func (*Gauge) ProtoMessage() {}
-func (*Gauge) Descriptor() ([]byte, []int) {
- return fileDescriptor_d1e5ddb18987a258, []int{1}
+ Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
}
-func (m *Gauge) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Gauge.Unmarshal(m, b)
-}
-func (m *Gauge) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Gauge.Marshal(b, m, deterministic)
-}
-func (m *Gauge) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Gauge.Merge(m, src)
+func (x *Gauge) Reset() {
+ *x = Gauge{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (m *Gauge) XXX_Size() int {
- return xxx_messageInfo_Gauge.Size(m)
+
+func (x *Gauge) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *Gauge) XXX_DiscardUnknown() {
- xxx_messageInfo_Gauge.DiscardUnknown(m)
+
+func (*Gauge) ProtoMessage() {}
+
+func (x *Gauge) ProtoReflect() protoreflect.Message {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-var xxx_messageInfo_Gauge proto.InternalMessageInfo
+// Deprecated: Use Gauge.ProtoReflect.Descriptor instead.
+func (*Gauge) Descriptor() ([]byte, []int) {
+ return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{1}
+}
-func (m *Gauge) GetValue() float64 {
- if m != nil && m.Value != nil {
- return *m.Value
+func (x *Gauge) GetValue() float64 {
+ if x != nil && x.Value != nil {
+ return *x.Value
}
return 0
}
type Counter struct {
- Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
- Exemplar *Exemplar `protobuf:"bytes,2,opt,name=exemplar" json:"exemplar,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
-func (m *Counter) Reset() { *m = Counter{} }
-func (m *Counter) String() string { return proto.CompactTextString(m) }
-func (*Counter) ProtoMessage() {}
-func (*Counter) Descriptor() ([]byte, []int) {
- return fileDescriptor_d1e5ddb18987a258, []int{2}
+ Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
+ Exemplar *Exemplar `protobuf:"bytes,2,opt,name=exemplar" json:"exemplar,omitempty"`
}
-func (m *Counter) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Counter.Unmarshal(m, b)
-}
-func (m *Counter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Counter.Marshal(b, m, deterministic)
-}
-func (m *Counter) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Counter.Merge(m, src)
+func (x *Counter) Reset() {
+ *x = Counter{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (m *Counter) XXX_Size() int {
- return xxx_messageInfo_Counter.Size(m)
+
+func (x *Counter) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *Counter) XXX_DiscardUnknown() {
- xxx_messageInfo_Counter.DiscardUnknown(m)
+
+func (*Counter) ProtoMessage() {}
+
+func (x *Counter) ProtoReflect() protoreflect.Message {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-var xxx_messageInfo_Counter proto.InternalMessageInfo
+// Deprecated: Use Counter.ProtoReflect.Descriptor instead.
+func (*Counter) Descriptor() ([]byte, []int) {
+ return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{2}
+}
-func (m *Counter) GetValue() float64 {
- if m != nil && m.Value != nil {
- return *m.Value
+func (x *Counter) GetValue() float64 {
+ if x != nil && x.Value != nil {
+ return *x.Value
}
return 0
}
-func (m *Counter) GetExemplar() *Exemplar {
- if m != nil {
- return m.Exemplar
+func (x *Counter) GetExemplar() *Exemplar {
+ if x != nil {
+ return x.Exemplar
}
return nil
}
type Quantile struct {
- Quantile *float64 `protobuf:"fixed64,1,opt,name=quantile" json:"quantile,omitempty"`
- Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
-func (m *Quantile) Reset() { *m = Quantile{} }
-func (m *Quantile) String() string { return proto.CompactTextString(m) }
-func (*Quantile) ProtoMessage() {}
-func (*Quantile) Descriptor() ([]byte, []int) {
- return fileDescriptor_d1e5ddb18987a258, []int{3}
+ Quantile *float64 `protobuf:"fixed64,1,opt,name=quantile" json:"quantile,omitempty"`
+ Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"`
}
-func (m *Quantile) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Quantile.Unmarshal(m, b)
-}
-func (m *Quantile) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Quantile.Marshal(b, m, deterministic)
-}
-func (m *Quantile) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Quantile.Merge(m, src)
+func (x *Quantile) Reset() {
+ *x = Quantile{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (m *Quantile) XXX_Size() int {
- return xxx_messageInfo_Quantile.Size(m)
+
+func (x *Quantile) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *Quantile) XXX_DiscardUnknown() {
- xxx_messageInfo_Quantile.DiscardUnknown(m)
+
+func (*Quantile) ProtoMessage() {}
+
+func (x *Quantile) ProtoReflect() protoreflect.Message {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-var xxx_messageInfo_Quantile proto.InternalMessageInfo
+// Deprecated: Use Quantile.ProtoReflect.Descriptor instead.
+func (*Quantile) Descriptor() ([]byte, []int) {
+ return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{3}
+}
-func (m *Quantile) GetQuantile() float64 {
- if m != nil && m.Quantile != nil {
- return *m.Quantile
+func (x *Quantile) GetQuantile() float64 {
+ if x != nil && x.Quantile != nil {
+ return *x.Quantile
}
return 0
}
-func (m *Quantile) GetValue() float64 {
- if m != nil && m.Value != nil {
- return *m.Value
+func (x *Quantile) GetValue() float64 {
+ if x != nil && x.Value != nil {
+ return *x.Value
}
return 0
}
type Summary struct {
- SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"`
- SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"`
- Quantile []*Quantile `protobuf:"bytes,3,rep,name=quantile" json:"quantile,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
-func (m *Summary) Reset() { *m = Summary{} }
-func (m *Summary) String() string { return proto.CompactTextString(m) }
-func (*Summary) ProtoMessage() {}
-func (*Summary) Descriptor() ([]byte, []int) {
- return fileDescriptor_d1e5ddb18987a258, []int{4}
+ SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"`
+ SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"`
+ Quantile []*Quantile `protobuf:"bytes,3,rep,name=quantile" json:"quantile,omitempty"`
}
-func (m *Summary) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Summary.Unmarshal(m, b)
-}
-func (m *Summary) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Summary.Marshal(b, m, deterministic)
-}
-func (m *Summary) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Summary.Merge(m, src)
+func (x *Summary) Reset() {
+ *x = Summary{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (m *Summary) XXX_Size() int {
- return xxx_messageInfo_Summary.Size(m)
+
+func (x *Summary) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *Summary) XXX_DiscardUnknown() {
- xxx_messageInfo_Summary.DiscardUnknown(m)
+
+func (*Summary) ProtoMessage() {}
+
+func (x *Summary) ProtoReflect() protoreflect.Message {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[4]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-var xxx_messageInfo_Summary proto.InternalMessageInfo
+// Deprecated: Use Summary.ProtoReflect.Descriptor instead.
+func (*Summary) Descriptor() ([]byte, []int) {
+ return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{4}
+}
-func (m *Summary) GetSampleCount() uint64 {
- if m != nil && m.SampleCount != nil {
- return *m.SampleCount
+func (x *Summary) GetSampleCount() uint64 {
+ if x != nil && x.SampleCount != nil {
+ return *x.SampleCount
}
return 0
}
-func (m *Summary) GetSampleSum() float64 {
- if m != nil && m.SampleSum != nil {
- return *m.SampleSum
+func (x *Summary) GetSampleSum() float64 {
+ if x != nil && x.SampleSum != nil {
+ return *x.SampleSum
}
return 0
}
-func (m *Summary) GetQuantile() []*Quantile {
- if m != nil {
- return m.Quantile
+func (x *Summary) GetQuantile() []*Quantile {
+ if x != nil {
+ return x.Quantile
}
return nil
}
type Untyped struct {
- Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
-func (m *Untyped) Reset() { *m = Untyped{} }
-func (m *Untyped) String() string { return proto.CompactTextString(m) }
-func (*Untyped) ProtoMessage() {}
-func (*Untyped) Descriptor() ([]byte, []int) {
- return fileDescriptor_d1e5ddb18987a258, []int{5}
+ Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
}
-func (m *Untyped) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Untyped.Unmarshal(m, b)
-}
-func (m *Untyped) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Untyped.Marshal(b, m, deterministic)
-}
-func (m *Untyped) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Untyped.Merge(m, src)
+func (x *Untyped) Reset() {
+ *x = Untyped{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (m *Untyped) XXX_Size() int {
- return xxx_messageInfo_Untyped.Size(m)
+
+func (x *Untyped) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *Untyped) XXX_DiscardUnknown() {
- xxx_messageInfo_Untyped.DiscardUnknown(m)
+
+func (*Untyped) ProtoMessage() {}
+
+func (x *Untyped) ProtoReflect() protoreflect.Message {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[5]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-var xxx_messageInfo_Untyped proto.InternalMessageInfo
+// Deprecated: Use Untyped.ProtoReflect.Descriptor instead.
+func (*Untyped) Descriptor() ([]byte, []int) {
+ return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{5}
+}
-func (m *Untyped) GetValue() float64 {
- if m != nil && m.Value != nil {
- return *m.Value
+func (x *Untyped) GetValue() float64 {
+ if x != nil && x.Value != nil {
+ return *x.Value
}
return 0
}
type Histogram struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"`
- SampleCountFloat *float64 `protobuf:"fixed64,4,opt,name=sample_count_float,json=sampleCountFloat" json:"sample_count_float,omitempty"`
+ SampleCountFloat *float64 `protobuf:"fixed64,4,opt,name=sample_count_float,json=sampleCountFloat" json:"sample_count_float,omitempty"` // Overrides sample_count if > 0.
SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"`
// Buckets for the conventional histogram.
- Bucket []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"`
+ Bucket []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"` // Ordered in increasing order of upper_bound, +Inf bucket is optional.
// schema defines the bucket schema. Currently, valid numbers are -4 <= n <= 8.
// They are all for base-2 bucket schemas, where 1 is a bucket boundary in each case, and
// then each power of two is divided into 2^n logarithmic buckets.
// Or in other words, each bucket boundary is the previous boundary times 2^(2^-n).
// In the future, more bucket schemas may be added using numbers < -4 or > 8.
Schema *int32 `protobuf:"zigzag32,5,opt,name=schema" json:"schema,omitempty"`
- ZeroThreshold *float64 `protobuf:"fixed64,6,opt,name=zero_threshold,json=zeroThreshold" json:"zero_threshold,omitempty"`
- ZeroCount *uint64 `protobuf:"varint,7,opt,name=zero_count,json=zeroCount" json:"zero_count,omitempty"`
- ZeroCountFloat *float64 `protobuf:"fixed64,8,opt,name=zero_count_float,json=zeroCountFloat" json:"zero_count_float,omitempty"`
+ ZeroThreshold *float64 `protobuf:"fixed64,6,opt,name=zero_threshold,json=zeroThreshold" json:"zero_threshold,omitempty"` // Breadth of the zero bucket.
+ ZeroCount *uint64 `protobuf:"varint,7,opt,name=zero_count,json=zeroCount" json:"zero_count,omitempty"` // Count in zero bucket.
+ ZeroCountFloat *float64 `protobuf:"fixed64,8,opt,name=zero_count_float,json=zeroCountFloat" json:"zero_count_float,omitempty"` // Overrides sb_zero_count if > 0.
// Negative buckets for the native histogram.
NegativeSpan []*BucketSpan `protobuf:"bytes,9,rep,name=negative_span,json=negativeSpan" json:"negative_span,omitempty"`
// Use either "negative_delta" or "negative_count", the former for
// regular histograms with integer counts, the latter for float
// histograms.
- NegativeDelta []int64 `protobuf:"zigzag64,10,rep,name=negative_delta,json=negativeDelta" json:"negative_delta,omitempty"`
- NegativeCount []float64 `protobuf:"fixed64,11,rep,name=negative_count,json=negativeCount" json:"negative_count,omitempty"`
+ NegativeDelta []int64 `protobuf:"zigzag64,10,rep,name=negative_delta,json=negativeDelta" json:"negative_delta,omitempty"` // Count delta of each bucket compared to previous one (or to zero for 1st bucket).
+ NegativeCount []float64 `protobuf:"fixed64,11,rep,name=negative_count,json=negativeCount" json:"negative_count,omitempty"` // Absolute count of each bucket.
// Positive buckets for the native histogram.
PositiveSpan []*BucketSpan `protobuf:"bytes,12,rep,name=positive_span,json=positiveSpan" json:"positive_span,omitempty"`
// Use either "positive_delta" or "positive_count", the former for
// regular histograms with integer counts, the latter for float
// histograms.
- PositiveDelta []int64 `protobuf:"zigzag64,13,rep,name=positive_delta,json=positiveDelta" json:"positive_delta,omitempty"`
- PositiveCount []float64 `protobuf:"fixed64,14,rep,name=positive_count,json=positiveCount" json:"positive_count,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
+ PositiveDelta []int64 `protobuf:"zigzag64,13,rep,name=positive_delta,json=positiveDelta" json:"positive_delta,omitempty"` // Count delta of each bucket compared to previous one (or to zero for 1st bucket).
+ PositiveCount []float64 `protobuf:"fixed64,14,rep,name=positive_count,json=positiveCount" json:"positive_count,omitempty"` // Absolute count of each bucket.
}
-func (m *Histogram) Reset() { *m = Histogram{} }
-func (m *Histogram) String() string { return proto.CompactTextString(m) }
-func (*Histogram) ProtoMessage() {}
-func (*Histogram) Descriptor() ([]byte, []int) {
- return fileDescriptor_d1e5ddb18987a258, []int{6}
+func (x *Histogram) Reset() {
+ *x = Histogram{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (m *Histogram) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Histogram.Unmarshal(m, b)
+func (x *Histogram) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *Histogram) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Histogram.Marshal(b, m, deterministic)
-}
-func (m *Histogram) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Histogram.Merge(m, src)
-}
-func (m *Histogram) XXX_Size() int {
- return xxx_messageInfo_Histogram.Size(m)
-}
-func (m *Histogram) XXX_DiscardUnknown() {
- xxx_messageInfo_Histogram.DiscardUnknown(m)
+
+func (*Histogram) ProtoMessage() {}
+
+func (x *Histogram) ProtoReflect() protoreflect.Message {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[6]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-var xxx_messageInfo_Histogram proto.InternalMessageInfo
+// Deprecated: Use Histogram.ProtoReflect.Descriptor instead.
+func (*Histogram) Descriptor() ([]byte, []int) {
+ return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{6}
+}
-func (m *Histogram) GetSampleCount() uint64 {
- if m != nil && m.SampleCount != nil {
- return *m.SampleCount
+func (x *Histogram) GetSampleCount() uint64 {
+ if x != nil && x.SampleCount != nil {
+ return *x.SampleCount
}
return 0
}
-func (m *Histogram) GetSampleCountFloat() float64 {
- if m != nil && m.SampleCountFloat != nil {
- return *m.SampleCountFloat
+func (x *Histogram) GetSampleCountFloat() float64 {
+ if x != nil && x.SampleCountFloat != nil {
+ return *x.SampleCountFloat
}
return 0
}
-func (m *Histogram) GetSampleSum() float64 {
- if m != nil && m.SampleSum != nil {
- return *m.SampleSum
+func (x *Histogram) GetSampleSum() float64 {
+ if x != nil && x.SampleSum != nil {
+ return *x.SampleSum
}
return 0
}
-func (m *Histogram) GetBucket() []*Bucket {
- if m != nil {
- return m.Bucket
+func (x *Histogram) GetBucket() []*Bucket {
+ if x != nil {
+ return x.Bucket
}
return nil
}
-func (m *Histogram) GetSchema() int32 {
- if m != nil && m.Schema != nil {
- return *m.Schema
+func (x *Histogram) GetSchema() int32 {
+ if x != nil && x.Schema != nil {
+ return *x.Schema
}
return 0
}
-func (m *Histogram) GetZeroThreshold() float64 {
- if m != nil && m.ZeroThreshold != nil {
- return *m.ZeroThreshold
+func (x *Histogram) GetZeroThreshold() float64 {
+ if x != nil && x.ZeroThreshold != nil {
+ return *x.ZeroThreshold
}
return 0
}
-func (m *Histogram) GetZeroCount() uint64 {
- if m != nil && m.ZeroCount != nil {
- return *m.ZeroCount
+func (x *Histogram) GetZeroCount() uint64 {
+ if x != nil && x.ZeroCount != nil {
+ return *x.ZeroCount
}
return 0
}
-func (m *Histogram) GetZeroCountFloat() float64 {
- if m != nil && m.ZeroCountFloat != nil {
- return *m.ZeroCountFloat
+func (x *Histogram) GetZeroCountFloat() float64 {
+ if x != nil && x.ZeroCountFloat != nil {
+ return *x.ZeroCountFloat
}
return 0
}
-func (m *Histogram) GetNegativeSpan() []*BucketSpan {
- if m != nil {
- return m.NegativeSpan
+func (x *Histogram) GetNegativeSpan() []*BucketSpan {
+ if x != nil {
+ return x.NegativeSpan
}
return nil
}
-func (m *Histogram) GetNegativeDelta() []int64 {
- if m != nil {
- return m.NegativeDelta
+func (x *Histogram) GetNegativeDelta() []int64 {
+ if x != nil {
+ return x.NegativeDelta
}
return nil
}
-func (m *Histogram) GetNegativeCount() []float64 {
- if m != nil {
- return m.NegativeCount
+func (x *Histogram) GetNegativeCount() []float64 {
+ if x != nil {
+ return x.NegativeCount
}
return nil
}
-func (m *Histogram) GetPositiveSpan() []*BucketSpan {
- if m != nil {
- return m.PositiveSpan
+func (x *Histogram) GetPositiveSpan() []*BucketSpan {
+ if x != nil {
+ return x.PositiveSpan
}
return nil
}
-func (m *Histogram) GetPositiveDelta() []int64 {
- if m != nil {
- return m.PositiveDelta
+func (x *Histogram) GetPositiveDelta() []int64 {
+ if x != nil {
+ return x.PositiveDelta
}
return nil
}
-func (m *Histogram) GetPositiveCount() []float64 {
- if m != nil {
- return m.PositiveCount
+func (x *Histogram) GetPositiveCount() []float64 {
+ if x != nil {
+ return x.PositiveCount
}
return nil
}
@@ -513,64 +598,72 @@ func (m *Histogram) GetPositiveCount() []float64 {
// A Bucket of a conventional histogram, each of which is treated as
// an individual counter-like time series by Prometheus.
type Bucket struct {
- CumulativeCount *uint64 `protobuf:"varint,1,opt,name=cumulative_count,json=cumulativeCount" json:"cumulative_count,omitempty"`
- CumulativeCountFloat *float64 `protobuf:"fixed64,4,opt,name=cumulative_count_float,json=cumulativeCountFloat" json:"cumulative_count_float,omitempty"`
- UpperBound *float64 `protobuf:"fixed64,2,opt,name=upper_bound,json=upperBound" json:"upper_bound,omitempty"`
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ CumulativeCount *uint64 `protobuf:"varint,1,opt,name=cumulative_count,json=cumulativeCount" json:"cumulative_count,omitempty"` // Cumulative in increasing order.
+ CumulativeCountFloat *float64 `protobuf:"fixed64,4,opt,name=cumulative_count_float,json=cumulativeCountFloat" json:"cumulative_count_float,omitempty"` // Overrides cumulative_count if > 0.
+ UpperBound *float64 `protobuf:"fixed64,2,opt,name=upper_bound,json=upperBound" json:"upper_bound,omitempty"` // Inclusive.
Exemplar *Exemplar `protobuf:"bytes,3,opt,name=exemplar" json:"exemplar,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
}
-func (m *Bucket) Reset() { *m = Bucket{} }
-func (m *Bucket) String() string { return proto.CompactTextString(m) }
-func (*Bucket) ProtoMessage() {}
-func (*Bucket) Descriptor() ([]byte, []int) {
- return fileDescriptor_d1e5ddb18987a258, []int{7}
+func (x *Bucket) Reset() {
+ *x = Bucket{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (m *Bucket) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Bucket.Unmarshal(m, b)
-}
-func (m *Bucket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Bucket.Marshal(b, m, deterministic)
-}
-func (m *Bucket) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Bucket.Merge(m, src)
-}
-func (m *Bucket) XXX_Size() int {
- return xxx_messageInfo_Bucket.Size(m)
+func (x *Bucket) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *Bucket) XXX_DiscardUnknown() {
- xxx_messageInfo_Bucket.DiscardUnknown(m)
+
+func (*Bucket) ProtoMessage() {}
+
+func (x *Bucket) ProtoReflect() protoreflect.Message {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[7]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-var xxx_messageInfo_Bucket proto.InternalMessageInfo
+// Deprecated: Use Bucket.ProtoReflect.Descriptor instead.
+func (*Bucket) Descriptor() ([]byte, []int) {
+ return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{7}
+}
-func (m *Bucket) GetCumulativeCount() uint64 {
- if m != nil && m.CumulativeCount != nil {
- return *m.CumulativeCount
+func (x *Bucket) GetCumulativeCount() uint64 {
+ if x != nil && x.CumulativeCount != nil {
+ return *x.CumulativeCount
}
return 0
}
-func (m *Bucket) GetCumulativeCountFloat() float64 {
- if m != nil && m.CumulativeCountFloat != nil {
- return *m.CumulativeCountFloat
+func (x *Bucket) GetCumulativeCountFloat() float64 {
+ if x != nil && x.CumulativeCountFloat != nil {
+ return *x.CumulativeCountFloat
}
return 0
}
-func (m *Bucket) GetUpperBound() float64 {
- if m != nil && m.UpperBound != nil {
- return *m.UpperBound
+func (x *Bucket) GetUpperBound() float64 {
+ if x != nil && x.UpperBound != nil {
+ return *x.UpperBound
}
return 0
}
-func (m *Bucket) GetExemplar() *Exemplar {
- if m != nil {
- return m.Exemplar
+func (x *Bucket) GetExemplar() *Exemplar {
+ if x != nil {
+ return x.Exemplar
}
return nil
}
@@ -582,333 +675,658 @@ func (m *Bucket) GetExemplar() *Exemplar {
// structured here (with all the buckets in a single array separate
// from the Spans).
type BucketSpan struct {
- Offset *int32 `protobuf:"zigzag32,1,opt,name=offset" json:"offset,omitempty"`
- Length *uint32 `protobuf:"varint,2,opt,name=length" json:"length,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
-func (m *BucketSpan) Reset() { *m = BucketSpan{} }
-func (m *BucketSpan) String() string { return proto.CompactTextString(m) }
-func (*BucketSpan) ProtoMessage() {}
-func (*BucketSpan) Descriptor() ([]byte, []int) {
- return fileDescriptor_d1e5ddb18987a258, []int{8}
+ Offset *int32 `protobuf:"zigzag32,1,opt,name=offset" json:"offset,omitempty"` // Gap to previous span, or starting point for 1st span (which can be negative).
+ Length *uint32 `protobuf:"varint,2,opt,name=length" json:"length,omitempty"` // Length of consecutive buckets.
}
-func (m *BucketSpan) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_BucketSpan.Unmarshal(m, b)
-}
-func (m *BucketSpan) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_BucketSpan.Marshal(b, m, deterministic)
-}
-func (m *BucketSpan) XXX_Merge(src proto.Message) {
- xxx_messageInfo_BucketSpan.Merge(m, src)
+func (x *BucketSpan) Reset() {
+ *x = BucketSpan{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (m *BucketSpan) XXX_Size() int {
- return xxx_messageInfo_BucketSpan.Size(m)
+
+func (x *BucketSpan) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *BucketSpan) XXX_DiscardUnknown() {
- xxx_messageInfo_BucketSpan.DiscardUnknown(m)
+
+func (*BucketSpan) ProtoMessage() {}
+
+func (x *BucketSpan) ProtoReflect() protoreflect.Message {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[8]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-var xxx_messageInfo_BucketSpan proto.InternalMessageInfo
+// Deprecated: Use BucketSpan.ProtoReflect.Descriptor instead.
+func (*BucketSpan) Descriptor() ([]byte, []int) {
+ return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{8}
+}
-func (m *BucketSpan) GetOffset() int32 {
- if m != nil && m.Offset != nil {
- return *m.Offset
+func (x *BucketSpan) GetOffset() int32 {
+ if x != nil && x.Offset != nil {
+ return *x.Offset
}
return 0
}
-func (m *BucketSpan) GetLength() uint32 {
- if m != nil && m.Length != nil {
- return *m.Length
+func (x *BucketSpan) GetLength() uint32 {
+ if x != nil && x.Length != nil {
+ return *x.Length
}
return 0
}
type Exemplar struct {
- Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"`
- Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"`
- Timestamp *timestamp.Timestamp `protobuf:"bytes,3,opt,name=timestamp" json:"timestamp,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
-func (m *Exemplar) Reset() { *m = Exemplar{} }
-func (m *Exemplar) String() string { return proto.CompactTextString(m) }
-func (*Exemplar) ProtoMessage() {}
-func (*Exemplar) Descriptor() ([]byte, []int) {
- return fileDescriptor_d1e5ddb18987a258, []int{9}
+ Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"`
+ Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"`
+ Timestamp *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=timestamp" json:"timestamp,omitempty"` // OpenMetrics-style.
}
-func (m *Exemplar) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Exemplar.Unmarshal(m, b)
-}
-func (m *Exemplar) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Exemplar.Marshal(b, m, deterministic)
-}
-func (m *Exemplar) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Exemplar.Merge(m, src)
+func (x *Exemplar) Reset() {
+ *x = Exemplar{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[9]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (m *Exemplar) XXX_Size() int {
- return xxx_messageInfo_Exemplar.Size(m)
+
+func (x *Exemplar) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *Exemplar) XXX_DiscardUnknown() {
- xxx_messageInfo_Exemplar.DiscardUnknown(m)
+
+func (*Exemplar) ProtoMessage() {}
+
+func (x *Exemplar) ProtoReflect() protoreflect.Message {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[9]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-var xxx_messageInfo_Exemplar proto.InternalMessageInfo
+// Deprecated: Use Exemplar.ProtoReflect.Descriptor instead.
+func (*Exemplar) Descriptor() ([]byte, []int) {
+ return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{9}
+}
-func (m *Exemplar) GetLabel() []*LabelPair {
- if m != nil {
- return m.Label
+func (x *Exemplar) GetLabel() []*LabelPair {
+ if x != nil {
+ return x.Label
}
return nil
}
-func (m *Exemplar) GetValue() float64 {
- if m != nil && m.Value != nil {
- return *m.Value
+func (x *Exemplar) GetValue() float64 {
+ if x != nil && x.Value != nil {
+ return *x.Value
}
return 0
}
-func (m *Exemplar) GetTimestamp() *timestamp.Timestamp {
- if m != nil {
- return m.Timestamp
+func (x *Exemplar) GetTimestamp() *timestamppb.Timestamp {
+ if x != nil {
+ return x.Timestamp
}
return nil
}
type Metric struct {
- Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"`
- Gauge *Gauge `protobuf:"bytes,2,opt,name=gauge" json:"gauge,omitempty"`
- Counter *Counter `protobuf:"bytes,3,opt,name=counter" json:"counter,omitempty"`
- Summary *Summary `protobuf:"bytes,4,opt,name=summary" json:"summary,omitempty"`
- Untyped *Untyped `protobuf:"bytes,5,opt,name=untyped" json:"untyped,omitempty"`
- Histogram *Histogram `protobuf:"bytes,7,opt,name=histogram" json:"histogram,omitempty"`
- TimestampMs *int64 `protobuf:"varint,6,opt,name=timestamp_ms,json=timestampMs" json:"timestamp_ms,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Metric) Reset() { *m = Metric{} }
-func (m *Metric) String() string { return proto.CompactTextString(m) }
-func (*Metric) ProtoMessage() {}
-func (*Metric) Descriptor() ([]byte, []int) {
- return fileDescriptor_d1e5ddb18987a258, []int{10}
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"`
+ Gauge *Gauge `protobuf:"bytes,2,opt,name=gauge" json:"gauge,omitempty"`
+ Counter *Counter `protobuf:"bytes,3,opt,name=counter" json:"counter,omitempty"`
+ Summary *Summary `protobuf:"bytes,4,opt,name=summary" json:"summary,omitempty"`
+ Untyped *Untyped `protobuf:"bytes,5,opt,name=untyped" json:"untyped,omitempty"`
+ Histogram *Histogram `protobuf:"bytes,7,opt,name=histogram" json:"histogram,omitempty"`
+ TimestampMs *int64 `protobuf:"varint,6,opt,name=timestamp_ms,json=timestampMs" json:"timestamp_ms,omitempty"`
+}
+
+func (x *Metric) Reset() {
+ *x = Metric{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[10]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (m *Metric) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Metric.Unmarshal(m, b)
+func (x *Metric) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *Metric) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Metric.Marshal(b, m, deterministic)
-}
-func (m *Metric) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Metric.Merge(m, src)
-}
-func (m *Metric) XXX_Size() int {
- return xxx_messageInfo_Metric.Size(m)
-}
-func (m *Metric) XXX_DiscardUnknown() {
- xxx_messageInfo_Metric.DiscardUnknown(m)
+
+func (*Metric) ProtoMessage() {}
+
+func (x *Metric) ProtoReflect() protoreflect.Message {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[10]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-var xxx_messageInfo_Metric proto.InternalMessageInfo
+// Deprecated: Use Metric.ProtoReflect.Descriptor instead.
+func (*Metric) Descriptor() ([]byte, []int) {
+ return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{10}
+}
-func (m *Metric) GetLabel() []*LabelPair {
- if m != nil {
- return m.Label
+func (x *Metric) GetLabel() []*LabelPair {
+ if x != nil {
+ return x.Label
}
return nil
}
-func (m *Metric) GetGauge() *Gauge {
- if m != nil {
- return m.Gauge
+func (x *Metric) GetGauge() *Gauge {
+ if x != nil {
+ return x.Gauge
}
return nil
}
-func (m *Metric) GetCounter() *Counter {
- if m != nil {
- return m.Counter
+func (x *Metric) GetCounter() *Counter {
+ if x != nil {
+ return x.Counter
}
return nil
}
-func (m *Metric) GetSummary() *Summary {
- if m != nil {
- return m.Summary
+func (x *Metric) GetSummary() *Summary {
+ if x != nil {
+ return x.Summary
}
return nil
}
-func (m *Metric) GetUntyped() *Untyped {
- if m != nil {
- return m.Untyped
+func (x *Metric) GetUntyped() *Untyped {
+ if x != nil {
+ return x.Untyped
}
return nil
}
-func (m *Metric) GetHistogram() *Histogram {
- if m != nil {
- return m.Histogram
+func (x *Metric) GetHistogram() *Histogram {
+ if x != nil {
+ return x.Histogram
}
return nil
}
-func (m *Metric) GetTimestampMs() int64 {
- if m != nil && m.TimestampMs != nil {
- return *m.TimestampMs
+func (x *Metric) GetTimestampMs() int64 {
+ if x != nil && x.TimestampMs != nil {
+ return *x.TimestampMs
}
return 0
}
type MetricFamily struct {
- Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
- Help *string `protobuf:"bytes,2,opt,name=help" json:"help,omitempty"`
- Type *MetricType `protobuf:"varint,3,opt,name=type,enum=io.prometheus.client.MetricType" json:"type,omitempty"`
- Metric []*Metric `protobuf:"bytes,4,rep,name=metric" json:"metric,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *MetricFamily) Reset() { *m = MetricFamily{} }
-func (m *MetricFamily) String() string { return proto.CompactTextString(m) }
-func (*MetricFamily) ProtoMessage() {}
-func (*MetricFamily) Descriptor() ([]byte, []int) {
- return fileDescriptor_d1e5ddb18987a258, []int{11}
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ Help *string `protobuf:"bytes,2,opt,name=help" json:"help,omitempty"`
+ Type *MetricType `protobuf:"varint,3,opt,name=type,enum=io.prometheus.client.MetricType" json:"type,omitempty"`
+ Metric []*Metric `protobuf:"bytes,4,rep,name=metric" json:"metric,omitempty"`
+}
+
+func (x *MetricFamily) Reset() {
+ *x = MetricFamily{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[11]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (m *MetricFamily) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_MetricFamily.Unmarshal(m, b)
-}
-func (m *MetricFamily) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_MetricFamily.Marshal(b, m, deterministic)
-}
-func (m *MetricFamily) XXX_Merge(src proto.Message) {
- xxx_messageInfo_MetricFamily.Merge(m, src)
-}
-func (m *MetricFamily) XXX_Size() int {
- return xxx_messageInfo_MetricFamily.Size(m)
+func (x *MetricFamily) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *MetricFamily) XXX_DiscardUnknown() {
- xxx_messageInfo_MetricFamily.DiscardUnknown(m)
+
+func (*MetricFamily) ProtoMessage() {}
+
+func (x *MetricFamily) ProtoReflect() protoreflect.Message {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[11]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-var xxx_messageInfo_MetricFamily proto.InternalMessageInfo
+// Deprecated: Use MetricFamily.ProtoReflect.Descriptor instead.
+func (*MetricFamily) Descriptor() ([]byte, []int) {
+ return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{11}
+}
-func (m *MetricFamily) GetName() string {
- if m != nil && m.Name != nil {
- return *m.Name
+func (x *MetricFamily) GetName() string {
+ if x != nil && x.Name != nil {
+ return *x.Name
}
return ""
}
-func (m *MetricFamily) GetHelp() string {
- if m != nil && m.Help != nil {
- return *m.Help
+func (x *MetricFamily) GetHelp() string {
+ if x != nil && x.Help != nil {
+ return *x.Help
}
return ""
}
-func (m *MetricFamily) GetType() MetricType {
- if m != nil && m.Type != nil {
- return *m.Type
+func (x *MetricFamily) GetType() MetricType {
+ if x != nil && x.Type != nil {
+ return *x.Type
}
return MetricType_COUNTER
}
-func (m *MetricFamily) GetMetric() []*Metric {
- if m != nil {
- return m.Metric
+func (x *MetricFamily) GetMetric() []*Metric {
+ if x != nil {
+ return x.Metric
}
return nil
}
-func init() {
- proto.RegisterEnum("io.prometheus.client.MetricType", MetricType_name, MetricType_value)
- proto.RegisterType((*LabelPair)(nil), "io.prometheus.client.LabelPair")
- proto.RegisterType((*Gauge)(nil), "io.prometheus.client.Gauge")
- proto.RegisterType((*Counter)(nil), "io.prometheus.client.Counter")
- proto.RegisterType((*Quantile)(nil), "io.prometheus.client.Quantile")
- proto.RegisterType((*Summary)(nil), "io.prometheus.client.Summary")
- proto.RegisterType((*Untyped)(nil), "io.prometheus.client.Untyped")
- proto.RegisterType((*Histogram)(nil), "io.prometheus.client.Histogram")
- proto.RegisterType((*Bucket)(nil), "io.prometheus.client.Bucket")
- proto.RegisterType((*BucketSpan)(nil), "io.prometheus.client.BucketSpan")
- proto.RegisterType((*Exemplar)(nil), "io.prometheus.client.Exemplar")
- proto.RegisterType((*Metric)(nil), "io.prometheus.client.Metric")
- proto.RegisterType((*MetricFamily)(nil), "io.prometheus.client.MetricFamily")
-}
-
-func init() {
- proto.RegisterFile("io/prometheus/client/metrics.proto", fileDescriptor_d1e5ddb18987a258)
-}
-
-var fileDescriptor_d1e5ddb18987a258 = []byte{
- // 896 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x56, 0xdd, 0x8e, 0xdb, 0x44,
- 0x18, 0xc5, 0x9b, 0x5f, 0x7f, 0xd9, 0x6c, 0xd3, 0x61, 0x55, 0x59, 0x0b, 0xcb, 0x06, 0x4b, 0x48,
- 0x0b, 0x42, 0x8e, 0x40, 0x5b, 0x81, 0x0a, 0x5c, 0xec, 0xb6, 0xe9, 0x16, 0x89, 0xb4, 0x65, 0x92,
- 0x5c, 0x14, 0x2e, 0xac, 0x49, 0x32, 0xeb, 0x58, 0x78, 0x3c, 0xc6, 0x1e, 0x57, 0x2c, 0x2f, 0xc0,
- 0x35, 0xaf, 0xc0, 0xc3, 0xf0, 0x22, 0x3c, 0x08, 0x68, 0xfe, 0xec, 0xdd, 0xe2, 0x94, 0xd2, 0x3b,
- 0x7f, 0x67, 0xce, 0xf7, 0xcd, 0x39, 0xe3, 0xc9, 0x71, 0xc0, 0x8f, 0xf9, 0x24, 0xcb, 0x39, 0xa3,
- 0x62, 0x4b, 0xcb, 0x62, 0xb2, 0x4e, 0x62, 0x9a, 0x8a, 0x09, 0xa3, 0x22, 0x8f, 0xd7, 0x45, 0x90,
- 0xe5, 0x5c, 0x70, 0x74, 0x18, 0xf3, 0xa0, 0xe6, 0x04, 0x9a, 0x73, 0x74, 0x12, 0x71, 0x1e, 0x25,
- 0x74, 0xa2, 0x38, 0xab, 0xf2, 0x6a, 0x22, 0x62, 0x46, 0x0b, 0x41, 0x58, 0xa6, 0xdb, 0xfc, 0xfb,
- 0xe0, 0x7e, 0x47, 0x56, 0x34, 0x79, 0x4e, 0xe2, 0x1c, 0x21, 0x68, 0xa7, 0x84, 0x51, 0xcf, 0x19,
- 0x3b, 0xa7, 0x2e, 0x56, 0xcf, 0xe8, 0x10, 0x3a, 0x2f, 0x49, 0x52, 0x52, 0x6f, 0x4f, 0x81, 0xba,
- 0xf0, 0x8f, 0xa1, 0x73, 0x49, 0xca, 0xe8, 0xc6, 0xb2, 0xec, 0x71, 0xec, 0xf2, 0x8f, 0xd0, 0x7b,
- 0xc8, 0xcb, 0x54, 0xd0, 0xbc, 0x99, 0x80, 0x1e, 0x40, 0x9f, 0xfe, 0x42, 0x59, 0x96, 0x90, 0x5c,
- 0x0d, 0x1e, 0x7c, 0xfe, 0x41, 0xd0, 0x64, 0x20, 0x98, 0x1a, 0x16, 0xae, 0xf8, 0xfe, 0xd7, 0xd0,
- 0xff, 0xbe, 0x24, 0xa9, 0x88, 0x13, 0x8a, 0x8e, 0xa0, 0xff, 0xb3, 0x79, 0x36, 0x1b, 0x54, 0xf5,
- 0x6d, 0xe5, 0x95, 0xb4, 0xdf, 0x1c, 0xe8, 0xcd, 0x4b, 0xc6, 0x48, 0x7e, 0x8d, 0x3e, 0x84, 0xfd,
- 0x82, 0xb0, 0x2c, 0xa1, 0xe1, 0x5a, 0xaa, 0x55, 0x13, 0xda, 0x78, 0xa0, 0x31, 0x65, 0x00, 0x1d,
- 0x03, 0x18, 0x4a, 0x51, 0x32, 0x33, 0xc9, 0xd5, 0xc8, 0xbc, 0x64, 0xd2, 0x47, 0xb5, 0x7f, 0x6b,
- 0xdc, 0xda, 0xed, 0xc3, 0x2a, 0xae, 0xf5, 0xf9, 0x27, 0xd0, 0x5b, 0xa6, 0xe2, 0x3a, 0xa3, 0x9b,
- 0x1d, 0xa7, 0xf8, 0x57, 0x1b, 0xdc, 0x27, 0x71, 0x21, 0x78, 0x94, 0x13, 0xf6, 0x26, 0x62, 0x3f,
- 0x05, 0x74, 0x93, 0x12, 0x5e, 0x25, 0x9c, 0x08, 0xaf, 0xad, 0x66, 0x8e, 0x6e, 0x10, 0x1f, 0x4b,
- 0xfc, 0xbf, 0xac, 0x9d, 0x41, 0x77, 0x55, 0xae, 0x7f, 0xa2, 0xc2, 0x18, 0x7b, 0xbf, 0xd9, 0xd8,
- 0x85, 0xe2, 0x60, 0xc3, 0x45, 0xf7, 0xa0, 0x5b, 0xac, 0xb7, 0x94, 0x11, 0xaf, 0x33, 0x76, 0x4e,
- 0xef, 0x62, 0x53, 0xa1, 0x8f, 0xe0, 0xe0, 0x57, 0x9a, 0xf3, 0x50, 0x6c, 0x73, 0x5a, 0x6c, 0x79,
- 0xb2, 0xf1, 0xba, 0x6a, 0xc3, 0xa1, 0x44, 0x17, 0x16, 0x94, 0x9a, 0x14, 0x4d, 0x5b, 0xec, 0x29,
- 0x8b, 0xae, 0x44, 0xb4, 0xc1, 0x53, 0x18, 0xd5, 0xcb, 0xc6, 0x5e, 0x5f, 0xcd, 0x39, 0xa8, 0x48,
- 0xda, 0xdc, 0x14, 0x86, 0x29, 0x8d, 0x88, 0x88, 0x5f, 0xd2, 0xb0, 0xc8, 0x48, 0xea, 0xb9, 0xca,
- 0xc4, 0xf8, 0x75, 0x26, 0xe6, 0x19, 0x49, 0xf1, 0xbe, 0x6d, 0x93, 0x95, 0x94, 0x5d, 0x8d, 0xd9,
- 0xd0, 0x44, 0x10, 0x0f, 0xc6, 0xad, 0x53, 0x84, 0xab, 0xe1, 0x8f, 0x24, 0x78, 0x8b, 0xa6, 0xa5,
- 0x0f, 0xc6, 0x2d, 0xe9, 0xce, 0xa2, 0x5a, 0xfe, 0x14, 0x86, 0x19, 0x2f, 0xe2, 0x5a, 0xd4, 0xfe,
- 0x9b, 0x8a, 0xb2, 0x6d, 0x56, 0x54, 0x35, 0x46, 0x8b, 0x1a, 0x6a, 0x51, 0x16, 0xad, 0x44, 0x55,
- 0x34, 0x2d, 0xea, 0x40, 0x8b, 0xb2, 0xa8, 0x12, 0xe5, 0xff, 0xe9, 0x40, 0x57, 0x6f, 0x85, 0x3e,
- 0x86, 0xd1, 0xba, 0x64, 0x65, 0x72, 0xd3, 0x88, 0xbe, 0x66, 0x77, 0x6a, 0x5c, 0x5b, 0x39, 0x83,
- 0x7b, 0xaf, 0x52, 0x6f, 0x5d, 0xb7, 0xc3, 0x57, 0x1a, 0xf4, 0x5b, 0x39, 0x81, 0x41, 0x99, 0x65,
- 0x34, 0x0f, 0x57, 0xbc, 0x4c, 0x37, 0xe6, 0xce, 0x81, 0x82, 0x2e, 0x24, 0x72, 0x2b, 0x17, 0x5a,
- 0xff, 0x3b, 0x17, 0xa0, 0x3e, 0x32, 0x79, 0x11, 0xf9, 0xd5, 0x55, 0x41, 0xb5, 0x83, 0xbb, 0xd8,
- 0x54, 0x12, 0x4f, 0x68, 0x1a, 0x89, 0xad, 0xda, 0x7d, 0x88, 0x4d, 0xe5, 0xff, 0xee, 0x40, 0xdf,
- 0x0e, 0x45, 0xf7, 0xa1, 0x93, 0xc8, 0x54, 0xf4, 0x1c, 0xf5, 0x82, 0x4e, 0x9a, 0x35, 0x54, 0xc1,
- 0x89, 0x35, 0xbb, 0x39, 0x71, 0xd0, 0x97, 0xe0, 0x56, 0xa9, 0x6b, 0x4c, 0x1d, 0x05, 0x3a, 0x97,
- 0x03, 0x9b, 0xcb, 0xc1, 0xc2, 0x32, 0x70, 0x4d, 0xf6, 0xff, 0xde, 0x83, 0xee, 0x4c, 0xa5, 0xfc,
- 0xdb, 0x2a, 0xfa, 0x0c, 0x3a, 0x91, 0xcc, 0x69, 0x13, 0xb2, 0xef, 0x35, 0xb7, 0xa9, 0x28, 0xc7,
- 0x9a, 0x89, 0xbe, 0x80, 0xde, 0x5a, 0x67, 0xb7, 0x11, 0x7b, 0xdc, 0xdc, 0x64, 0x02, 0x1e, 0x5b,
- 0xb6, 0x6c, 0x2c, 0x74, 0xb0, 0xaa, 0x3b, 0xb0, 0xb3, 0xd1, 0xa4, 0x2f, 0xb6, 0x6c, 0xd9, 0x58,
- 0xea, 0x20, 0x54, 0xa1, 0xb1, 0xb3, 0xd1, 0xa4, 0x25, 0xb6, 0x6c, 0xf4, 0x0d, 0xb8, 0x5b, 0x9b,
- 0x8f, 0x2a, 0x2c, 0x76, 0x1e, 0x4c, 0x15, 0xa3, 0xb8, 0xee, 0x90, 0x89, 0x5a, 0x9d, 0x75, 0xc8,
- 0x0a, 0x95, 0x48, 0x2d, 0x3c, 0xa8, 0xb0, 0x59, 0xe1, 0xff, 0xe1, 0xc0, 0xbe, 0x7e, 0x03, 0x8f,
- 0x09, 0x8b, 0x93, 0xeb, 0xc6, 0x4f, 0x24, 0x82, 0xf6, 0x96, 0x26, 0x99, 0xf9, 0x42, 0xaa, 0x67,
- 0x74, 0x06, 0x6d, 0xa9, 0x51, 0x1d, 0xe1, 0xc1, 0xae, 0x5f, 0xb8, 0x9e, 0xbc, 0xb8, 0xce, 0x28,
- 0x56, 0x6c, 0x99, 0xb9, 0xfa, 0xab, 0xee, 0xb5, 0x5f, 0x97, 0xb9, 0xba, 0x0f, 0x1b, 0xee, 0x27,
- 0x2b, 0x80, 0x7a, 0x12, 0x1a, 0x40, 0xef, 0xe1, 0xb3, 0xe5, 0xd3, 0xc5, 0x14, 0x8f, 0xde, 0x41,
- 0x2e, 0x74, 0x2e, 0xcf, 0x97, 0x97, 0xd3, 0x91, 0x23, 0xf1, 0xf9, 0x72, 0x36, 0x3b, 0xc7, 0x2f,
- 0x46, 0x7b, 0xb2, 0x58, 0x3e, 0x5d, 0xbc, 0x78, 0x3e, 0x7d, 0x34, 0x6a, 0xa1, 0x21, 0xb8, 0x4f,
- 0xbe, 0x9d, 0x2f, 0x9e, 0x5d, 0xe2, 0xf3, 0xd9, 0xa8, 0x8d, 0xde, 0x85, 0x3b, 0xaa, 0x27, 0xac,
- 0xc1, 0xce, 0x05, 0x86, 0xc6, 0x3f, 0x18, 0x3f, 0x3c, 0x88, 0x62, 0xb1, 0x2d, 0x57, 0xc1, 0x9a,
- 0xb3, 0x7f, 0xff, 0x45, 0x09, 0x19, 0xdf, 0xd0, 0x64, 0x12, 0xf1, 0xaf, 0x62, 0x1e, 0xd6, 0xab,
- 0xa1, 0x5e, 0xfd, 0x27, 0x00, 0x00, 0xff, 0xff, 0x16, 0x77, 0x81, 0x98, 0xd7, 0x08, 0x00, 0x00,
+var File_io_prometheus_client_metrics_proto protoreflect.FileDescriptor
+
+var file_io_prometheus_client_metrics_proto_rawDesc = []byte{
+ 0x0a, 0x22, 0x69, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2f,
+ 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68,
+ 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65,
+ 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x35, 0x0a, 0x09, 0x4c,
+ 0x61, 0x62, 0x65, 0x6c, 0x50, 0x61, 0x69, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05,
+ 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c,
+ 0x75, 0x65, 0x22, 0x1d, 0x0a, 0x05, 0x47, 0x61, 0x75, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76,
+ 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75,
+ 0x65, 0x22, 0x5b, 0x0a, 0x07, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x12, 0x14, 0x0a, 0x05,
+ 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c,
+ 0x75, 0x65, 0x12, 0x3a, 0x0a, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74,
+ 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x78, 0x65, 0x6d,
+ 0x70, 0x6c, 0x61, 0x72, 0x52, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x22, 0x3c,
+ 0x0a, 0x08, 0x51, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x71, 0x75,
+ 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x08, 0x71, 0x75,
+ 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x87, 0x01, 0x0a,
+ 0x07, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x61, 0x6d, 0x70,
+ 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b,
+ 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73,
+ 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x5f, 0x73, 0x75, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52,
+ 0x09, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x53, 0x75, 0x6d, 0x12, 0x3a, 0x0a, 0x08, 0x71, 0x75,
+ 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69,
+ 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69,
+ 0x65, 0x6e, 0x74, 0x2e, 0x51, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x52, 0x08, 0x71, 0x75,
+ 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x22, 0x1f, 0x0a, 0x07, 0x55, 0x6e, 0x74, 0x79, 0x70, 0x65,
+ 0x64, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01,
+ 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xe3, 0x04, 0x0a, 0x09, 0x48, 0x69, 0x73, 0x74,
+ 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x5f,
+ 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x73, 0x61, 0x6d,
+ 0x70, 0x6c, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2c, 0x0a, 0x12, 0x73, 0x61, 0x6d, 0x70,
+ 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61, 0x74, 0x18, 0x04,
+ 0x20, 0x01, 0x28, 0x01, 0x52, 0x10, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x43, 0x6f, 0x75, 0x6e,
+ 0x74, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65,
+ 0x5f, 0x73, 0x75, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x09, 0x73, 0x61, 0x6d, 0x70,
+ 0x6c, 0x65, 0x53, 0x75, 0x6d, 0x12, 0x34, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18,
+ 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65,
+ 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x42, 0x75, 0x63,
+ 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x73,
+ 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x05, 0x20, 0x01, 0x28, 0x11, 0x52, 0x06, 0x73, 0x63, 0x68,
+ 0x65, 0x6d, 0x61, 0x12, 0x25, 0x0a, 0x0e, 0x7a, 0x65, 0x72, 0x6f, 0x5f, 0x74, 0x68, 0x72, 0x65,
+ 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0d, 0x7a, 0x65, 0x72,
+ 0x6f, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x7a, 0x65,
+ 0x72, 0x6f, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09,
+ 0x7a, 0x65, 0x72, 0x6f, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x28, 0x0a, 0x10, 0x7a, 0x65, 0x72,
+ 0x6f, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61, 0x74, 0x18, 0x08, 0x20,
+ 0x01, 0x28, 0x01, 0x52, 0x0e, 0x7a, 0x65, 0x72, 0x6f, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x46, 0x6c,
+ 0x6f, 0x61, 0x74, 0x12, 0x45, 0x0a, 0x0d, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f,
+ 0x73, 0x70, 0x61, 0x6e, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x69, 0x6f, 0x2e,
+ 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e,
+ 0x74, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x70, 0x61, 0x6e, 0x52, 0x0c, 0x6e, 0x65,
+ 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x12, 0x25, 0x0a, 0x0e, 0x6e, 0x65,
+ 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x64, 0x65, 0x6c, 0x74, 0x61, 0x18, 0x0a, 0x20, 0x03,
+ 0x28, 0x12, 0x52, 0x0d, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x44, 0x65, 0x6c, 0x74,
+ 0x61, 0x12, 0x25, 0x0a, 0x0e, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x6f,
+ 0x75, 0x6e, 0x74, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x01, 0x52, 0x0d, 0x6e, 0x65, 0x67, 0x61, 0x74,
+ 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x45, 0x0a, 0x0d, 0x70, 0x6f, 0x73, 0x69,
+ 0x74, 0x69, 0x76, 0x65, 0x5f, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32,
+ 0x20, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e,
+ 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x70, 0x61,
+ 0x6e, 0x52, 0x0c, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x12,
+ 0x25, 0x0a, 0x0e, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x64, 0x65, 0x6c, 0x74,
+ 0x61, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x12, 0x52, 0x0d, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76,
+ 0x65, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69,
+ 0x76, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x01, 0x52, 0x0d,
+ 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0xc6, 0x01,
+ 0x0a, 0x06, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x75, 0x6d, 0x75,
+ 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x04, 0x52, 0x0f, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f,
+ 0x75, 0x6e, 0x74, 0x12, 0x34, 0x0a, 0x16, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76,
+ 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61, 0x74, 0x18, 0x04, 0x20,
+ 0x01, 0x28, 0x01, 0x52, 0x14, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43,
+ 0x6f, 0x75, 0x6e, 0x74, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x75, 0x70, 0x70,
+ 0x65, 0x72, 0x5f, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a,
+ 0x75, 0x70, 0x70, 0x65, 0x72, 0x42, 0x6f, 0x75, 0x6e, 0x64, 0x12, 0x3a, 0x0a, 0x08, 0x65, 0x78,
+ 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69,
+ 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69,
+ 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x52, 0x08, 0x65, 0x78,
+ 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x22, 0x3c, 0x0a, 0x0a, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74,
+ 0x53, 0x70, 0x61, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x11, 0x52, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x16, 0x0a, 0x06,
+ 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x6c, 0x65,
+ 0x6e, 0x67, 0x74, 0x68, 0x22, 0x91, 0x01, 0x0a, 0x08, 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61,
+ 0x72, 0x12, 0x35, 0x0a, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b,
+ 0x32, 0x1f, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73,
+ 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x50, 0x61, 0x69,
+ 0x72, 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75,
+ 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x38,
+ 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74,
+ 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0xff, 0x02, 0x0a, 0x06, 0x4d, 0x65, 0x74,
+ 0x72, 0x69, 0x63, 0x12, 0x35, 0x0a, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x01, 0x20, 0x03,
+ 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65,
+ 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x50,
+ 0x61, 0x69, 0x72, 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x31, 0x0a, 0x05, 0x67, 0x61,
+ 0x75, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x69, 0x6f, 0x2e, 0x70,
+ 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74,
+ 0x2e, 0x47, 0x61, 0x75, 0x67, 0x65, 0x52, 0x05, 0x67, 0x61, 0x75, 0x67, 0x65, 0x12, 0x37, 0x0a,
+ 0x07, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d,
+ 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63,
+ 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x52, 0x07, 0x63,
+ 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x12, 0x37, 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72,
+ 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f,
+ 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x53,
+ 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12,
+ 0x37, 0x0a, 0x07, 0x75, 0x6e, 0x74, 0x79, 0x70, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73,
+ 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x55, 0x6e, 0x74, 0x79, 0x70, 0x65, 0x64, 0x52,
+ 0x07, 0x75, 0x6e, 0x74, 0x79, 0x70, 0x65, 0x64, 0x12, 0x3d, 0x0a, 0x09, 0x68, 0x69, 0x73, 0x74,
+ 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x69, 0x6f,
+ 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65,
+ 0x6e, 0x74, 0x2e, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x52, 0x09, 0x68, 0x69,
+ 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x12, 0x21, 0x0a, 0x0c, 0x74, 0x69, 0x6d, 0x65, 0x73,
+ 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x6d, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x74,
+ 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x4d, 0x73, 0x22, 0xa2, 0x01, 0x0a, 0x0c, 0x4d,
+ 0x65, 0x74, 0x72, 0x69, 0x63, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e,
+ 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12,
+ 0x12, 0x0a, 0x04, 0x68, 0x65, 0x6c, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68,
+ 0x65, 0x6c, 0x70, 0x12, 0x34, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28,
+ 0x0e, 0x32, 0x20, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75,
+ 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x54,
+ 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x34, 0x0a, 0x06, 0x6d, 0x65, 0x74,
+ 0x72, 0x69, 0x63, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x69, 0x6f, 0x2e, 0x70,
+ 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74,
+ 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x2a,
+ 0x62, 0x0a, 0x0a, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a,
+ 0x07, 0x43, 0x4f, 0x55, 0x4e, 0x54, 0x45, 0x52, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x47, 0x41,
+ 0x55, 0x47, 0x45, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x4d, 0x4d, 0x41, 0x52, 0x59,
+ 0x10, 0x02, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x54, 0x59, 0x50, 0x45, 0x44, 0x10, 0x03, 0x12,
+ 0x0d, 0x0a, 0x09, 0x48, 0x49, 0x53, 0x54, 0x4f, 0x47, 0x52, 0x41, 0x4d, 0x10, 0x04, 0x12, 0x13,
+ 0x0a, 0x0f, 0x47, 0x41, 0x55, 0x47, 0x45, 0x5f, 0x48, 0x49, 0x53, 0x54, 0x4f, 0x47, 0x52, 0x41,
+ 0x4d, 0x10, 0x05, 0x42, 0x52, 0x0a, 0x14, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74,
+ 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5a, 0x3a, 0x67, 0x69, 0x74,
+ 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65,
+ 0x75, 0x73, 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2f,
+ 0x67, 0x6f, 0x3b, 0x69, 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73,
+ 0x5f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74,
+}
+
+var (
+ file_io_prometheus_client_metrics_proto_rawDescOnce sync.Once
+ file_io_prometheus_client_metrics_proto_rawDescData = file_io_prometheus_client_metrics_proto_rawDesc
+)
+
+func file_io_prometheus_client_metrics_proto_rawDescGZIP() []byte {
+ file_io_prometheus_client_metrics_proto_rawDescOnce.Do(func() {
+ file_io_prometheus_client_metrics_proto_rawDescData = protoimpl.X.CompressGZIP(file_io_prometheus_client_metrics_proto_rawDescData)
+ })
+ return file_io_prometheus_client_metrics_proto_rawDescData
+}
+
+var file_io_prometheus_client_metrics_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
+var file_io_prometheus_client_metrics_proto_msgTypes = make([]protoimpl.MessageInfo, 12)
+var file_io_prometheus_client_metrics_proto_goTypes = []interface{}{
+ (MetricType)(0), // 0: io.prometheus.client.MetricType
+ (*LabelPair)(nil), // 1: io.prometheus.client.LabelPair
+ (*Gauge)(nil), // 2: io.prometheus.client.Gauge
+ (*Counter)(nil), // 3: io.prometheus.client.Counter
+ (*Quantile)(nil), // 4: io.prometheus.client.Quantile
+ (*Summary)(nil), // 5: io.prometheus.client.Summary
+ (*Untyped)(nil), // 6: io.prometheus.client.Untyped
+ (*Histogram)(nil), // 7: io.prometheus.client.Histogram
+ (*Bucket)(nil), // 8: io.prometheus.client.Bucket
+ (*BucketSpan)(nil), // 9: io.prometheus.client.BucketSpan
+ (*Exemplar)(nil), // 10: io.prometheus.client.Exemplar
+ (*Metric)(nil), // 11: io.prometheus.client.Metric
+ (*MetricFamily)(nil), // 12: io.prometheus.client.MetricFamily
+ (*timestamppb.Timestamp)(nil), // 13: google.protobuf.Timestamp
+}
+var file_io_prometheus_client_metrics_proto_depIdxs = []int32{
+ 10, // 0: io.prometheus.client.Counter.exemplar:type_name -> io.prometheus.client.Exemplar
+ 4, // 1: io.prometheus.client.Summary.quantile:type_name -> io.prometheus.client.Quantile
+ 8, // 2: io.prometheus.client.Histogram.bucket:type_name -> io.prometheus.client.Bucket
+ 9, // 3: io.prometheus.client.Histogram.negative_span:type_name -> io.prometheus.client.BucketSpan
+ 9, // 4: io.prometheus.client.Histogram.positive_span:type_name -> io.prometheus.client.BucketSpan
+ 10, // 5: io.prometheus.client.Bucket.exemplar:type_name -> io.prometheus.client.Exemplar
+ 1, // 6: io.prometheus.client.Exemplar.label:type_name -> io.prometheus.client.LabelPair
+ 13, // 7: io.prometheus.client.Exemplar.timestamp:type_name -> google.protobuf.Timestamp
+ 1, // 8: io.prometheus.client.Metric.label:type_name -> io.prometheus.client.LabelPair
+ 2, // 9: io.prometheus.client.Metric.gauge:type_name -> io.prometheus.client.Gauge
+ 3, // 10: io.prometheus.client.Metric.counter:type_name -> io.prometheus.client.Counter
+ 5, // 11: io.prometheus.client.Metric.summary:type_name -> io.prometheus.client.Summary
+ 6, // 12: io.prometheus.client.Metric.untyped:type_name -> io.prometheus.client.Untyped
+ 7, // 13: io.prometheus.client.Metric.histogram:type_name -> io.prometheus.client.Histogram
+ 0, // 14: io.prometheus.client.MetricFamily.type:type_name -> io.prometheus.client.MetricType
+ 11, // 15: io.prometheus.client.MetricFamily.metric:type_name -> io.prometheus.client.Metric
+ 16, // [16:16] is the sub-list for method output_type
+ 16, // [16:16] is the sub-list for method input_type
+ 16, // [16:16] is the sub-list for extension type_name
+ 16, // [16:16] is the sub-list for extension extendee
+ 0, // [0:16] is the sub-list for field type_name
+}
+
+func init() { file_io_prometheus_client_metrics_proto_init() }
+func file_io_prometheus_client_metrics_proto_init() {
+ if File_io_prometheus_client_metrics_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_io_prometheus_client_metrics_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*LabelPair); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_io_prometheus_client_metrics_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Gauge); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_io_prometheus_client_metrics_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Counter); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_io_prometheus_client_metrics_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Quantile); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_io_prometheus_client_metrics_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Summary); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_io_prometheus_client_metrics_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Untyped); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_io_prometheus_client_metrics_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Histogram); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_io_prometheus_client_metrics_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Bucket); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_io_prometheus_client_metrics_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*BucketSpan); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_io_prometheus_client_metrics_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Exemplar); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_io_prometheus_client_metrics_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Metric); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_io_prometheus_client_metrics_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*MetricFamily); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_io_prometheus_client_metrics_proto_rawDesc,
+ NumEnums: 1,
+ NumMessages: 12,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_io_prometheus_client_metrics_proto_goTypes,
+ DependencyIndexes: file_io_prometheus_client_metrics_proto_depIdxs,
+ EnumInfos: file_io_prometheus_client_metrics_proto_enumTypes,
+ MessageInfos: file_io_prometheus_client_metrics_proto_msgTypes,
+ }.Build()
+ File_io_prometheus_client_metrics_proto = out.File
+ file_io_prometheus_client_metrics_proto_rawDesc = nil
+ file_io_prometheus_client_metrics_proto_goTypes = nil
+ file_io_prometheus_client_metrics_proto_depIdxs = nil
}
diff --git a/vendor/github.com/prometheus/common/expfmt/decode.go b/vendor/github.com/prometheus/common/expfmt/decode.go
index 7657f841d6..9063978151 100644
--- a/vendor/github.com/prometheus/common/expfmt/decode.go
+++ b/vendor/github.com/prometheus/common/expfmt/decode.go
@@ -115,32 +115,31 @@ func (d *protoDecoder) Decode(v *dto.MetricFamily) error {
// textDecoder implements the Decoder interface for the text protocol.
type textDecoder struct {
r io.Reader
- p TextParser
- fams []*dto.MetricFamily
+ fams map[string]*dto.MetricFamily
+ err error
}
// Decode implements the Decoder interface.
func (d *textDecoder) Decode(v *dto.MetricFamily) error {
- // TODO(fabxc): Wrap this as a line reader to make streaming safer.
- if len(d.fams) == 0 {
- // No cached metric families, read everything and parse metrics.
- fams, err := d.p.TextToMetricFamilies(d.r)
- if err != nil {
- return err
- }
- if len(fams) == 0 {
- return io.EOF
- }
- d.fams = make([]*dto.MetricFamily, 0, len(fams))
- for _, f := range fams {
- d.fams = append(d.fams, f)
+ if d.err == nil {
+ // Read all metrics in one shot.
+ var p TextParser
+ d.fams, d.err = p.TextToMetricFamilies(d.r)
+ // If we don't get an error, store io.EOF for the end.
+ if d.err == nil {
+ d.err = io.EOF
}
}
-
- *v = *d.fams[0]
- d.fams = d.fams[1:]
-
- return nil
+ // Pick off one MetricFamily per Decode until there's nothing left.
+ for key, fam := range d.fams {
+ v.Name = fam.Name
+ v.Help = fam.Help
+ v.Type = fam.Type
+ v.Metric = fam.Metric
+ delete(d.fams, key)
+ return nil
+ }
+ return d.err
}
// SampleDecoder wraps a Decoder to extract samples from the metric families
diff --git a/vendor/github.com/prometheus/common/expfmt/encode.go b/vendor/github.com/prometheus/common/expfmt/encode.go
index 64dc0eb40c..7f611ffaad 100644
--- a/vendor/github.com/prometheus/common/expfmt/encode.go
+++ b/vendor/github.com/prometheus/common/expfmt/encode.go
@@ -18,9 +18,9 @@ import (
"io"
"net/http"
- "github.com/golang/protobuf/proto" //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
"github.com/matttproud/golang_protobuf_extensions/pbutil"
"github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg"
+ "google.golang.org/protobuf/encoding/prototext"
dto "github.com/prometheus/client_model/go"
)
@@ -99,8 +99,11 @@ func NegotiateIncludingOpenMetrics(h http.Header) Format {
if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") {
return FmtText
}
- if ac.Type+"/"+ac.SubType == OpenMetricsType && (ver == OpenMetricsVersion || ver == "") {
- return FmtOpenMetrics
+ if ac.Type+"/"+ac.SubType == OpenMetricsType && (ver == OpenMetricsVersion_0_0_1 || ver == OpenMetricsVersion_1_0_0 || ver == "") {
+ if ver == OpenMetricsVersion_1_0_0 {
+ return FmtOpenMetrics_1_0_0
+ }
+ return FmtOpenMetrics_0_0_1
}
}
return FmtText
@@ -133,7 +136,7 @@ func NewEncoder(w io.Writer, format Format) Encoder {
case FmtProtoText:
return encoderCloser{
encode: func(v *dto.MetricFamily) error {
- _, err := fmt.Fprintln(w, proto.MarshalTextString(v))
+ _, err := fmt.Fprintln(w, prototext.Format(v))
return err
},
close: func() error { return nil },
@@ -146,7 +149,7 @@ func NewEncoder(w io.Writer, format Format) Encoder {
},
close: func() error { return nil },
}
- case FmtOpenMetrics:
+ case FmtOpenMetrics_0_0_1, FmtOpenMetrics_1_0_0:
return encoderCloser{
encode: func(v *dto.MetricFamily) error {
_, err := MetricFamilyToOpenMetrics(w, v)
diff --git a/vendor/github.com/prometheus/common/expfmt/expfmt.go b/vendor/github.com/prometheus/common/expfmt/expfmt.go
index 0f176fa64f..c4cb20f0d3 100644
--- a/vendor/github.com/prometheus/common/expfmt/expfmt.go
+++ b/vendor/github.com/prometheus/common/expfmt/expfmt.go
@@ -19,20 +19,22 @@ type Format string
// Constants to assemble the Content-Type values for the different wire protocols.
const (
- TextVersion = "0.0.4"
- ProtoType = `application/vnd.google.protobuf`
- ProtoProtocol = `io.prometheus.client.MetricFamily`
- ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";"
- OpenMetricsType = `application/openmetrics-text`
- OpenMetricsVersion = "0.0.1"
+ TextVersion = "0.0.4"
+ ProtoType = `application/vnd.google.protobuf`
+ ProtoProtocol = `io.prometheus.client.MetricFamily`
+ ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";"
+ OpenMetricsType = `application/openmetrics-text`
+ OpenMetricsVersion_0_0_1 = "0.0.1"
+ OpenMetricsVersion_1_0_0 = "1.0.0"
// The Content-Type values for the different wire protocols.
- FmtUnknown Format = ``
- FmtText Format = `text/plain; version=` + TextVersion + `; charset=utf-8`
- FmtProtoDelim Format = ProtoFmt + ` encoding=delimited`
- FmtProtoText Format = ProtoFmt + ` encoding=text`
- FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text`
- FmtOpenMetrics Format = OpenMetricsType + `; version=` + OpenMetricsVersion + `; charset=utf-8`
+ FmtUnknown Format = ``
+ FmtText Format = `text/plain; version=` + TextVersion + `; charset=utf-8`
+ FmtProtoDelim Format = ProtoFmt + ` encoding=delimited`
+ FmtProtoText Format = ProtoFmt + ` encoding=text`
+ FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text`
+ FmtOpenMetrics_1_0_0 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_1_0_0 + `; charset=utf-8`
+ FmtOpenMetrics_0_0_1 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_0_0_1 + `; charset=utf-8`
)
const (
diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz.go b/vendor/github.com/prometheus/common/expfmt/fuzz.go
index f819e4f8b5..dfac962a4e 100644
--- a/vendor/github.com/prometheus/common/expfmt/fuzz.go
+++ b/vendor/github.com/prometheus/common/expfmt/fuzz.go
@@ -21,8 +21,8 @@ import "bytes"
// Fuzz text metric parser with with github.com/dvyukov/go-fuzz:
//
-// go-fuzz-build github.com/prometheus/common/expfmt
-// go-fuzz -bin expfmt-fuzz.zip -workdir fuzz
+// go-fuzz-build github.com/prometheus/common/expfmt
+// go-fuzz -bin expfmt-fuzz.zip -workdir fuzz
//
// Further input samples should go in the folder fuzz/corpus.
func Fuzz(in []byte) int {
diff --git a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go
index 9d94ae9eff..21cdddcf05 100644
--- a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go
+++ b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go
@@ -46,20 +46,20 @@ import (
// missing features and peculiarities to avoid complications when switching from
// Prometheus to OpenMetrics or vice versa:
//
-// - Counters are expected to have the `_total` suffix in their metric name. In
-// the output, the suffix will be truncated from the `# TYPE` and `# HELP`
-// line. A counter with a missing `_total` suffix is not an error. However,
-// its type will be set to `unknown` in that case to avoid invalid OpenMetrics
-// output.
+// - Counters are expected to have the `_total` suffix in their metric name. In
+// the output, the suffix will be truncated from the `# TYPE` and `# HELP`
+// line. A counter with a missing `_total` suffix is not an error. However,
+// its type will be set to `unknown` in that case to avoid invalid OpenMetrics
+// output.
//
-// - No support for the following (optional) features: `# UNIT` line, `_created`
-// line, info type, stateset type, gaugehistogram type.
+// - No support for the following (optional) features: `# UNIT` line, `_created`
+// line, info type, stateset type, gaugehistogram type.
//
-// - The size of exemplar labels is not checked (i.e. it's possible to create
-// exemplars that are larger than allowed by the OpenMetrics specification).
+// - The size of exemplar labels is not checked (i.e. it's possible to create
+// exemplars that are larger than allowed by the OpenMetrics specification).
//
-// - The value of Counters is not checked. (OpenMetrics doesn't allow counters
-// with a `NaN` value.)
+// - The value of Counters is not checked. (OpenMetrics doesn't allow counters
+// with a `NaN` value.)
func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int, err error) {
name := in.GetName()
if name == "" {
diff --git a/vendor/github.com/prometheus/common/expfmt/text_create.go b/vendor/github.com/prometheus/common/expfmt/text_create.go
index 5ba503b065..2946b8f1a6 100644
--- a/vendor/github.com/prometheus/common/expfmt/text_create.go
+++ b/vendor/github.com/prometheus/common/expfmt/text_create.go
@@ -17,7 +17,6 @@ import (
"bufio"
"fmt"
"io"
- "io/ioutil"
"math"
"strconv"
"strings"
@@ -44,7 +43,7 @@ const (
var (
bufPool = sync.Pool{
New: func() interface{} {
- return bufio.NewWriter(ioutil.Discard)
+ return bufio.NewWriter(io.Discard)
},
}
numBufPool = sync.Pool{
diff --git a/vendor/github.com/prometheus/common/expfmt/text_parse.go b/vendor/github.com/prometheus/common/expfmt/text_parse.go
index 84be0643ec..35db1cc9d7 100644
--- a/vendor/github.com/prometheus/common/expfmt/text_parse.go
+++ b/vendor/github.com/prometheus/common/expfmt/text_parse.go
@@ -24,8 +24,8 @@ import (
dto "github.com/prometheus/client_model/go"
- "github.com/golang/protobuf/proto" //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
"github.com/prometheus/common/model"
+ "google.golang.org/protobuf/proto"
)
// A stateFn is a function that represents a state in a state machine. By
@@ -142,9 +142,13 @@ func (p *TextParser) reset(in io.Reader) {
func (p *TextParser) startOfLine() stateFn {
p.lineCount++
if p.skipBlankTab(); p.err != nil {
- // End of input reached. This is the only case where
- // that is not an error but a signal that we are done.
- p.err = nil
+ // This is the only place that we expect to see io.EOF,
+ // which is not an error but the signal that we are done.
+ // Any other error that happens to align with the start of
+ // a line is still an error.
+ if p.err == io.EOF {
+ p.err = nil
+ }
return nil
}
switch p.currentByte {
diff --git a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go
index 26e92288c7..a21b9d15dd 100644
--- a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go
+++ b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go
@@ -11,18 +11,18 @@ Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
- Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
+ Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
+ Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
- Neither the name of the Open Knowledge Foundation Ltd. nor the
- names of its contributors may be used to endorse or promote
- products derived from this software without specific prior written
- permission.
+ Neither the name of the Open Knowledge Foundation Ltd. nor the
+ names of its contributors may be used to endorse or promote
+ products derived from this software without specific prior written
+ permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
@@ -35,8 +35,6 @@ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
*/
package goautoneg
diff --git a/vendor/github.com/prometheus/common/model/time.go b/vendor/github.com/prometheus/common/model/time.go
index c909b8aa8c..5727452c1e 100644
--- a/vendor/github.com/prometheus/common/model/time.go
+++ b/vendor/github.com/prometheus/common/model/time.go
@@ -18,7 +18,6 @@ import (
"errors"
"fmt"
"math"
- "regexp"
"strconv"
"strings"
"time"
@@ -183,54 +182,78 @@ func (d *Duration) Type() string {
return "duration"
}
-var durationRE = regexp.MustCompile("^(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?$")
+func isdigit(c byte) bool { return c >= '0' && c <= '9' }
+
+// Units are required to go in order from biggest to smallest.
+// This guards against confusion from "1m1d" being 1 minute + 1 day, not 1 month + 1 day.
+var unitMap = map[string]struct {
+ pos int
+ mult uint64
+}{
+ "ms": {7, uint64(time.Millisecond)},
+ "s": {6, uint64(time.Second)},
+ "m": {5, uint64(time.Minute)},
+ "h": {4, uint64(time.Hour)},
+ "d": {3, uint64(24 * time.Hour)},
+ "w": {2, uint64(7 * 24 * time.Hour)},
+ "y": {1, uint64(365 * 24 * time.Hour)},
+}
// ParseDuration parses a string into a time.Duration, assuming that a year
// always has 365d, a week always has 7d, and a day always has 24h.
-func ParseDuration(durationStr string) (Duration, error) {
- switch durationStr {
+func ParseDuration(s string) (Duration, error) {
+ switch s {
case "0":
// Allow 0 without a unit.
return 0, nil
case "":
return 0, errors.New("empty duration string")
}
- matches := durationRE.FindStringSubmatch(durationStr)
- if matches == nil {
- return 0, fmt.Errorf("not a valid duration string: %q", durationStr)
- }
- var dur time.Duration
- // Parse the match at pos `pos` in the regex and use `mult` to turn that
- // into ms, then add that value to the total parsed duration.
- var overflowErr error
- m := func(pos int, mult time.Duration) {
- if matches[pos] == "" {
- return
+ orig := s
+ var dur uint64
+ lastUnitPos := 0
+
+ for s != "" {
+ if !isdigit(s[0]) {
+ return 0, fmt.Errorf("not a valid duration string: %q", orig)
+ }
+ // Consume [0-9]*
+ i := 0
+ for ; i < len(s) && isdigit(s[i]); i++ {
+ }
+ v, err := strconv.ParseUint(s[:i], 10, 0)
+ if err != nil {
+ return 0, fmt.Errorf("not a valid duration string: %q", orig)
}
- n, _ := strconv.Atoi(matches[pos])
+ s = s[i:]
+ // Consume unit.
+ for i = 0; i < len(s) && !isdigit(s[i]); i++ {
+ }
+ if i == 0 {
+ return 0, fmt.Errorf("not a valid duration string: %q", orig)
+ }
+ u := s[:i]
+ s = s[i:]
+ unit, ok := unitMap[u]
+ if !ok {
+ return 0, fmt.Errorf("unknown unit %q in duration %q", u, orig)
+ }
+ if unit.pos <= lastUnitPos { // Units must go in order from biggest to smallest.
+ return 0, fmt.Errorf("not a valid duration string: %q", orig)
+ }
+ lastUnitPos = unit.pos
// Check if the provided duration overflows time.Duration (> ~ 290years).
- if n > int((1<<63-1)/mult/time.Millisecond) {
- overflowErr = errors.New("duration out of range")
+ if v > 1<<63/unit.mult {
+ return 0, errors.New("duration out of range")
}
- d := time.Duration(n) * time.Millisecond
- dur += d * mult
-
- if dur < 0 {
- overflowErr = errors.New("duration out of range")
+ dur += v * unit.mult
+ if dur > 1<<63-1 {
+ return 0, errors.New("duration out of range")
}
}
-
- m(2, 1000*60*60*24*365) // y
- m(4, 1000*60*60*24*7) // w
- m(6, 1000*60*60*24) // d
- m(8, 1000*60*60) // h
- m(10, 1000*60) // m
- m(12, 1000) // s
- m(14, 1) // ms
-
- return Duration(dur), overflowErr
+ return Duration(dur), nil
}
func (d Duration) String() string {
diff --git a/vendor/github.com/prometheus/common/model/value.go b/vendor/github.com/prometheus/common/model/value.go
index c9d8fb1a28..9eb440413f 100644
--- a/vendor/github.com/prometheus/common/model/value.go
+++ b/vendor/github.com/prometheus/common/model/value.go
@@ -16,20 +16,12 @@ package model
import (
"encoding/json"
"fmt"
- "math"
"sort"
"strconv"
"strings"
)
var (
- // ZeroSamplePair is the pseudo zero-value of SamplePair used to signal a
- // non-existing sample pair. It is a SamplePair with timestamp Earliest and
- // value 0.0. Note that the natural zero value of SamplePair has a timestamp
- // of 0, which is possible to appear in a real SamplePair and thus not
- // suitable to signal a non-existing SamplePair.
- ZeroSamplePair = SamplePair{Timestamp: Earliest}
-
// ZeroSample is the pseudo zero-value of Sample used to signal a
// non-existing sample. It is a Sample with timestamp Earliest, value 0.0,
// and metric nil. Note that the natural zero value of Sample has a timestamp
@@ -38,82 +30,14 @@ var (
ZeroSample = Sample{Timestamp: Earliest}
)
-// A SampleValue is a representation of a value for a given sample at a given
-// time.
-type SampleValue float64
-
-// MarshalJSON implements json.Marshaler.
-func (v SampleValue) MarshalJSON() ([]byte, error) {
- return json.Marshal(v.String())
-}
-
-// UnmarshalJSON implements json.Unmarshaler.
-func (v *SampleValue) UnmarshalJSON(b []byte) error {
- if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' {
- return fmt.Errorf("sample value must be a quoted string")
- }
- f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64)
- if err != nil {
- return err
- }
- *v = SampleValue(f)
- return nil
-}
-
-// Equal returns true if the value of v and o is equal or if both are NaN. Note
-// that v==o is false if both are NaN. If you want the conventional float
-// behavior, use == to compare two SampleValues.
-func (v SampleValue) Equal(o SampleValue) bool {
- if v == o {
- return true
- }
- return math.IsNaN(float64(v)) && math.IsNaN(float64(o))
-}
-
-func (v SampleValue) String() string {
- return strconv.FormatFloat(float64(v), 'f', -1, 64)
-}
-
-// SamplePair pairs a SampleValue with a Timestamp.
-type SamplePair struct {
- Timestamp Time
- Value SampleValue
-}
-
-// MarshalJSON implements json.Marshaler.
-func (s SamplePair) MarshalJSON() ([]byte, error) {
- t, err := json.Marshal(s.Timestamp)
- if err != nil {
- return nil, err
- }
- v, err := json.Marshal(s.Value)
- if err != nil {
- return nil, err
- }
- return []byte(fmt.Sprintf("[%s,%s]", t, v)), nil
-}
-
-// UnmarshalJSON implements json.Unmarshaler.
-func (s *SamplePair) UnmarshalJSON(b []byte) error {
- v := [...]json.Unmarshaler{&s.Timestamp, &s.Value}
- return json.Unmarshal(b, &v)
-}
-
-// Equal returns true if this SamplePair and o have equal Values and equal
-// Timestamps. The semantics of Value equality is defined by SampleValue.Equal.
-func (s *SamplePair) Equal(o *SamplePair) bool {
- return s == o || (s.Value.Equal(o.Value) && s.Timestamp.Equal(o.Timestamp))
-}
-
-func (s SamplePair) String() string {
- return fmt.Sprintf("%s @[%s]", s.Value, s.Timestamp)
-}
-
-// Sample is a sample pair associated with a metric.
+// Sample is a sample pair associated with a metric. A single sample must either
+// define Value or Histogram but not both. Histogram == nil implies the Value
+// field is used, otherwise it should be ignored.
type Sample struct {
- Metric Metric `json:"metric"`
- Value SampleValue `json:"value"`
- Timestamp Time `json:"timestamp"`
+ Metric Metric `json:"metric"`
+ Value SampleValue `json:"value"`
+ Timestamp Time `json:"timestamp"`
+ Histogram *SampleHistogram `json:"histogram"`
}
// Equal compares first the metrics, then the timestamp, then the value. The
@@ -129,11 +53,19 @@ func (s *Sample) Equal(o *Sample) bool {
if !s.Timestamp.Equal(o.Timestamp) {
return false
}
-
+ if s.Histogram != nil {
+ return s.Histogram.Equal(o.Histogram)
+ }
return s.Value.Equal(o.Value)
}
func (s Sample) String() string {
+ if s.Histogram != nil {
+ return fmt.Sprintf("%s => %s", s.Metric, SampleHistogramPair{
+ Timestamp: s.Timestamp,
+ Histogram: s.Histogram,
+ })
+ }
return fmt.Sprintf("%s => %s", s.Metric, SamplePair{
Timestamp: s.Timestamp,
Value: s.Value,
@@ -142,6 +74,19 @@ func (s Sample) String() string {
// MarshalJSON implements json.Marshaler.
func (s Sample) MarshalJSON() ([]byte, error) {
+ if s.Histogram != nil {
+ v := struct {
+ Metric Metric `json:"metric"`
+ Histogram SampleHistogramPair `json:"histogram"`
+ }{
+ Metric: s.Metric,
+ Histogram: SampleHistogramPair{
+ Timestamp: s.Timestamp,
+ Histogram: s.Histogram,
+ },
+ }
+ return json.Marshal(&v)
+ }
v := struct {
Metric Metric `json:"metric"`
Value SamplePair `json:"value"`
@@ -152,21 +97,25 @@ func (s Sample) MarshalJSON() ([]byte, error) {
Value: s.Value,
},
}
-
return json.Marshal(&v)
}
// UnmarshalJSON implements json.Unmarshaler.
func (s *Sample) UnmarshalJSON(b []byte) error {
v := struct {
- Metric Metric `json:"metric"`
- Value SamplePair `json:"value"`
+ Metric Metric `json:"metric"`
+ Value SamplePair `json:"value"`
+ Histogram SampleHistogramPair `json:"histogram"`
}{
Metric: s.Metric,
Value: SamplePair{
Timestamp: s.Timestamp,
Value: s.Value,
},
+ Histogram: SampleHistogramPair{
+ Timestamp: s.Timestamp,
+ Histogram: s.Histogram,
+ },
}
if err := json.Unmarshal(b, &v); err != nil {
@@ -174,8 +123,13 @@ func (s *Sample) UnmarshalJSON(b []byte) error {
}
s.Metric = v.Metric
- s.Timestamp = v.Value.Timestamp
- s.Value = v.Value.Value
+ if v.Histogram.Histogram != nil {
+ s.Timestamp = v.Histogram.Timestamp
+ s.Histogram = v.Histogram.Histogram
+ } else {
+ s.Timestamp = v.Value.Timestamp
+ s.Value = v.Value.Value
+ }
return nil
}
@@ -221,80 +175,76 @@ func (s Samples) Equal(o Samples) bool {
// SampleStream is a stream of Values belonging to an attached COWMetric.
type SampleStream struct {
- Metric Metric `json:"metric"`
- Values []SamplePair `json:"values"`
+ Metric Metric `json:"metric"`
+ Values []SamplePair `json:"values"`
+ Histograms []SampleHistogramPair `json:"histograms"`
}
func (ss SampleStream) String() string {
- vals := make([]string, len(ss.Values))
+ valuesLength := len(ss.Values)
+ vals := make([]string, valuesLength+len(ss.Histograms))
for i, v := range ss.Values {
vals[i] = v.String()
}
+ for i, v := range ss.Histograms {
+ vals[i+valuesLength] = v.String()
+ }
return fmt.Sprintf("%s =>\n%s", ss.Metric, strings.Join(vals, "\n"))
}
-// Value is a generic interface for values resulting from a query evaluation.
-type Value interface {
- Type() ValueType
- String() string
+func (ss SampleStream) MarshalJSON() ([]byte, error) {
+ if len(ss.Histograms) > 0 && len(ss.Values) > 0 {
+ v := struct {
+ Metric Metric `json:"metric"`
+ Values []SamplePair `json:"values"`
+ Histograms []SampleHistogramPair `json:"histograms"`
+ }{
+ Metric: ss.Metric,
+ Values: ss.Values,
+ Histograms: ss.Histograms,
+ }
+ return json.Marshal(&v)
+ } else if len(ss.Histograms) > 0 {
+ v := struct {
+ Metric Metric `json:"metric"`
+ Histograms []SampleHistogramPair `json:"histograms"`
+ }{
+ Metric: ss.Metric,
+ Histograms: ss.Histograms,
+ }
+ return json.Marshal(&v)
+ } else {
+ v := struct {
+ Metric Metric `json:"metric"`
+ Values []SamplePair `json:"values"`
+ }{
+ Metric: ss.Metric,
+ Values: ss.Values,
+ }
+ return json.Marshal(&v)
+ }
}
-func (Matrix) Type() ValueType { return ValMatrix }
-func (Vector) Type() ValueType { return ValVector }
-func (*Scalar) Type() ValueType { return ValScalar }
-func (*String) Type() ValueType { return ValString }
-
-type ValueType int
-
-const (
- ValNone ValueType = iota
- ValScalar
- ValVector
- ValMatrix
- ValString
-)
-
-// MarshalJSON implements json.Marshaler.
-func (et ValueType) MarshalJSON() ([]byte, error) {
- return json.Marshal(et.String())
-}
+func (ss *SampleStream) UnmarshalJSON(b []byte) error {
+ v := struct {
+ Metric Metric `json:"metric"`
+ Values []SamplePair `json:"values"`
+ Histograms []SampleHistogramPair `json:"histograms"`
+ }{
+ Metric: ss.Metric,
+ Values: ss.Values,
+ Histograms: ss.Histograms,
+ }
-func (et *ValueType) UnmarshalJSON(b []byte) error {
- var s string
- if err := json.Unmarshal(b, &s); err != nil {
+ if err := json.Unmarshal(b, &v); err != nil {
return err
}
- switch s {
- case "":
- *et = ValNone
- case "scalar":
- *et = ValScalar
- case "vector":
- *et = ValVector
- case "matrix":
- *et = ValMatrix
- case "string":
- *et = ValString
- default:
- return fmt.Errorf("unknown value type %q", s)
- }
- return nil
-}
-func (e ValueType) String() string {
- switch e {
- case ValNone:
- return ""
- case ValScalar:
- return "scalar"
- case ValVector:
- return "vector"
- case ValMatrix:
- return "matrix"
- case ValString:
- return "string"
- }
- panic("ValueType.String: unhandled value type")
+ ss.Metric = v.Metric
+ ss.Values = v.Values
+ ss.Histograms = v.Histograms
+
+ return nil
}
// Scalar is a scalar value evaluated at the set timestamp.
diff --git a/vendor/github.com/prometheus/common/model/value_float.go b/vendor/github.com/prometheus/common/model/value_float.go
new file mode 100644
index 0000000000..0f615a7053
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/value_float.go
@@ -0,0 +1,100 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "encoding/json"
+ "fmt"
+ "math"
+ "strconv"
+)
+
+var (
+ // ZeroSamplePair is the pseudo zero-value of SamplePair used to signal a
+ // non-existing sample pair. It is a SamplePair with timestamp Earliest and
+ // value 0.0. Note that the natural zero value of SamplePair has a timestamp
+ // of 0, which is possible to appear in a real SamplePair and thus not
+ // suitable to signal a non-existing SamplePair.
+ ZeroSamplePair = SamplePair{Timestamp: Earliest}
+)
+
+// A SampleValue is a representation of a value for a given sample at a given
+// time.
+type SampleValue float64
+
+// MarshalJSON implements json.Marshaler.
+func (v SampleValue) MarshalJSON() ([]byte, error) {
+ return json.Marshal(v.String())
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (v *SampleValue) UnmarshalJSON(b []byte) error {
+ if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' {
+ return fmt.Errorf("sample value must be a quoted string")
+ }
+ f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64)
+ if err != nil {
+ return err
+ }
+ *v = SampleValue(f)
+ return nil
+}
+
+// Equal returns true if the value of v and o is equal or if both are NaN. Note
+// that v==o is false if both are NaN. If you want the conventional float
+// behavior, use == to compare two SampleValues.
+func (v SampleValue) Equal(o SampleValue) bool {
+ if v == o {
+ return true
+ }
+ return math.IsNaN(float64(v)) && math.IsNaN(float64(o))
+}
+
+func (v SampleValue) String() string {
+ return strconv.FormatFloat(float64(v), 'f', -1, 64)
+}
+
+// SamplePair pairs a SampleValue with a Timestamp.
+type SamplePair struct {
+ Timestamp Time
+ Value SampleValue
+}
+
+func (s SamplePair) MarshalJSON() ([]byte, error) {
+ t, err := json.Marshal(s.Timestamp)
+ if err != nil {
+ return nil, err
+ }
+ v, err := json.Marshal(s.Value)
+ if err != nil {
+ return nil, err
+ }
+ return []byte(fmt.Sprintf("[%s,%s]", t, v)), nil
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (s *SamplePair) UnmarshalJSON(b []byte) error {
+ v := [...]json.Unmarshaler{&s.Timestamp, &s.Value}
+ return json.Unmarshal(b, &v)
+}
+
+// Equal returns true if this SamplePair and o have equal Values and equal
+// Timestamps. The semantics of Value equality is defined by SampleValue.Equal.
+func (s *SamplePair) Equal(o *SamplePair) bool {
+ return s == o || (s.Value.Equal(o.Value) && s.Timestamp.Equal(o.Timestamp))
+}
+
+func (s SamplePair) String() string {
+ return fmt.Sprintf("%s @[%s]", s.Value, s.Timestamp)
+}
diff --git a/vendor/github.com/prometheus/common/model/value_histogram.go b/vendor/github.com/prometheus/common/model/value_histogram.go
new file mode 100644
index 0000000000..54bb038cff
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/value_histogram.go
@@ -0,0 +1,178 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "encoding/json"
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+type FloatString float64
+
+func (v FloatString) String() string {
+ return strconv.FormatFloat(float64(v), 'f', -1, 64)
+}
+
+func (v FloatString) MarshalJSON() ([]byte, error) {
+ return json.Marshal(v.String())
+}
+
+func (v *FloatString) UnmarshalJSON(b []byte) error {
+ if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' {
+ return fmt.Errorf("float value must be a quoted string")
+ }
+ f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64)
+ if err != nil {
+ return err
+ }
+ *v = FloatString(f)
+ return nil
+}
+
+type HistogramBucket struct {
+ Boundaries int32
+ Lower FloatString
+ Upper FloatString
+ Count FloatString
+}
+
+func (s HistogramBucket) MarshalJSON() ([]byte, error) {
+ b, err := json.Marshal(s.Boundaries)
+ if err != nil {
+ return nil, err
+ }
+ l, err := json.Marshal(s.Lower)
+ if err != nil {
+ return nil, err
+ }
+ u, err := json.Marshal(s.Upper)
+ if err != nil {
+ return nil, err
+ }
+ c, err := json.Marshal(s.Count)
+ if err != nil {
+ return nil, err
+ }
+ return []byte(fmt.Sprintf("[%s,%s,%s,%s]", b, l, u, c)), nil
+}
+
+func (s *HistogramBucket) UnmarshalJSON(buf []byte) error {
+ tmp := []interface{}{&s.Boundaries, &s.Lower, &s.Upper, &s.Count}
+ wantLen := len(tmp)
+ if err := json.Unmarshal(buf, &tmp); err != nil {
+ return err
+ }
+ if gotLen := len(tmp); gotLen != wantLen {
+ return fmt.Errorf("wrong number of fields: %d != %d", gotLen, wantLen)
+ }
+ return nil
+}
+
+func (s *HistogramBucket) Equal(o *HistogramBucket) bool {
+ return s == o || (s.Boundaries == o.Boundaries && s.Lower == o.Lower && s.Upper == o.Upper && s.Count == o.Count)
+}
+
+func (b HistogramBucket) String() string {
+ var sb strings.Builder
+ lowerInclusive := b.Boundaries == 1 || b.Boundaries == 3
+ upperInclusive := b.Boundaries == 0 || b.Boundaries == 3
+ if lowerInclusive {
+ sb.WriteRune('[')
+ } else {
+ sb.WriteRune('(')
+ }
+ fmt.Fprintf(&sb, "%g,%g", b.Lower, b.Upper)
+ if upperInclusive {
+ sb.WriteRune(']')
+ } else {
+ sb.WriteRune(')')
+ }
+ fmt.Fprintf(&sb, ":%v", b.Count)
+ return sb.String()
+}
+
+type HistogramBuckets []*HistogramBucket
+
+func (s HistogramBuckets) Equal(o HistogramBuckets) bool {
+ if len(s) != len(o) {
+ return false
+ }
+
+ for i, bucket := range s {
+ if !bucket.Equal(o[i]) {
+ return false
+ }
+ }
+ return true
+}
+
+type SampleHistogram struct {
+ Count FloatString `json:"count"`
+ Sum FloatString `json:"sum"`
+ Buckets HistogramBuckets `json:"buckets"`
+}
+
+func (s SampleHistogram) String() string {
+ return fmt.Sprintf("Count: %f, Sum: %f, Buckets: %v", s.Count, s.Sum, s.Buckets)
+}
+
+func (s *SampleHistogram) Equal(o *SampleHistogram) bool {
+ return s == o || (s.Count == o.Count && s.Sum == o.Sum && s.Buckets.Equal(o.Buckets))
+}
+
+type SampleHistogramPair struct {
+ Timestamp Time
+ // Histogram should never be nil, it's only stored as pointer for efficiency.
+ Histogram *SampleHistogram
+}
+
+func (s SampleHistogramPair) MarshalJSON() ([]byte, error) {
+ if s.Histogram == nil {
+ return nil, fmt.Errorf("histogram is nil")
+ }
+ t, err := json.Marshal(s.Timestamp)
+ if err != nil {
+ return nil, err
+ }
+ v, err := json.Marshal(s.Histogram)
+ if err != nil {
+ return nil, err
+ }
+ return []byte(fmt.Sprintf("[%s,%s]", t, v)), nil
+}
+
+func (s *SampleHistogramPair) UnmarshalJSON(buf []byte) error {
+ tmp := []interface{}{&s.Timestamp, &s.Histogram}
+ wantLen := len(tmp)
+ if err := json.Unmarshal(buf, &tmp); err != nil {
+ return err
+ }
+ if gotLen := len(tmp); gotLen != wantLen {
+ return fmt.Errorf("wrong number of fields: %d != %d", gotLen, wantLen)
+ }
+ if s.Histogram == nil {
+ return fmt.Errorf("histogram is null")
+ }
+ return nil
+}
+
+func (s SampleHistogramPair) String() string {
+ return fmt.Sprintf("%s @[%s]", s.Histogram, s.Timestamp)
+}
+
+func (s *SampleHistogramPair) Equal(o *SampleHistogramPair) bool {
+ return s == o || (s.Histogram.Equal(o.Histogram) && s.Timestamp.Equal(o.Timestamp))
+}
diff --git a/vendor/github.com/prometheus/common/model/value_type.go b/vendor/github.com/prometheus/common/model/value_type.go
new file mode 100644
index 0000000000..726c50ee63
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/value_type.go
@@ -0,0 +1,83 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "encoding/json"
+ "fmt"
+)
+
+// Value is a generic interface for values resulting from a query evaluation.
+type Value interface {
+ Type() ValueType
+ String() string
+}
+
+func (Matrix) Type() ValueType { return ValMatrix }
+func (Vector) Type() ValueType { return ValVector }
+func (*Scalar) Type() ValueType { return ValScalar }
+func (*String) Type() ValueType { return ValString }
+
+type ValueType int
+
+const (
+ ValNone ValueType = iota
+ ValScalar
+ ValVector
+ ValMatrix
+ ValString
+)
+
+// MarshalJSON implements json.Marshaler.
+func (et ValueType) MarshalJSON() ([]byte, error) {
+ return json.Marshal(et.String())
+}
+
+func (et *ValueType) UnmarshalJSON(b []byte) error {
+ var s string
+ if err := json.Unmarshal(b, &s); err != nil {
+ return err
+ }
+ switch s {
+ case "":
+ *et = ValNone
+ case "scalar":
+ *et = ValScalar
+ case "vector":
+ *et = ValVector
+ case "matrix":
+ *et = ValMatrix
+ case "string":
+ *et = ValString
+ default:
+ return fmt.Errorf("unknown value type %q", s)
+ }
+ return nil
+}
+
+func (e ValueType) String() string {
+ switch e {
+ case ValNone:
+ return ""
+ case ValScalar:
+ return "scalar"
+ case ValVector:
+ return "vector"
+ case ValMatrix:
+ return "matrix"
+ case ValString:
+ return "string"
+ }
+ panic("ValueType.String: unhandled value type")
+}
diff --git a/vendor/github.com/prometheus/procfs/Makefile.common b/vendor/github.com/prometheus/procfs/Makefile.common
index 6c8e3e2197..b111d25620 100644
--- a/vendor/github.com/prometheus/procfs/Makefile.common
+++ b/vendor/github.com/prometheus/procfs/Makefile.common
@@ -55,19 +55,22 @@ ifneq ($(shell which gotestsum),)
endif
endif
-PROMU_VERSION ?= 0.13.0
+PROMU_VERSION ?= 0.14.0
PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz
+SKIP_GOLANGCI_LINT :=
GOLANGCI_LINT :=
GOLANGCI_LINT_OPTS ?=
-GOLANGCI_LINT_VERSION ?= v1.45.2
+GOLANGCI_LINT_VERSION ?= v1.51.2
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64.
# windows isn't included here because of the path separator being different.
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))
ifeq ($(GOHOSTARCH),$(filter $(GOHOSTARCH),amd64 i386))
# If we're in CI and there is an Actions file, that means the linter
# is being run in Actions, so we don't need to run it here.
- ifeq (,$(CIRCLE_JOB))
+ ifneq (,$(SKIP_GOLANGCI_LINT))
+ GOLANGCI_LINT :=
+ else ifeq (,$(CIRCLE_JOB))
GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint
else ifeq (,$(wildcard .github/workflows/golangci-lint.yml))
GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint
@@ -88,6 +91,8 @@ BUILD_DOCKER_ARCHS = $(addprefix common-docker-,$(DOCKER_ARCHS))
PUBLISH_DOCKER_ARCHS = $(addprefix common-docker-publish-,$(DOCKER_ARCHS))
TAG_DOCKER_ARCHS = $(addprefix common-docker-tag-latest-,$(DOCKER_ARCHS))
+SANITIZED_DOCKER_IMAGE_TAG := $(subst +,-,$(DOCKER_IMAGE_TAG))
+
ifeq ($(GOHOSTARCH),amd64)
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux freebsd darwin windows))
# Only supported on amd64
@@ -202,7 +207,7 @@ common-tarball: promu
.PHONY: common-docker $(BUILD_DOCKER_ARCHS)
common-docker: $(BUILD_DOCKER_ARCHS)
$(BUILD_DOCKER_ARCHS): common-docker-%:
- docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" \
+ docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" \
-f $(DOCKERFILE_PATH) \
--build-arg ARCH="$*" \
--build-arg OS="linux" \
@@ -211,19 +216,19 @@ $(BUILD_DOCKER_ARCHS): common-docker-%:
.PHONY: common-docker-publish $(PUBLISH_DOCKER_ARCHS)
common-docker-publish: $(PUBLISH_DOCKER_ARCHS)
$(PUBLISH_DOCKER_ARCHS): common-docker-publish-%:
- docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)"
+ docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)"
DOCKER_MAJOR_VERSION_TAG = $(firstword $(subst ., ,$(shell cat VERSION)))
.PHONY: common-docker-tag-latest $(TAG_DOCKER_ARCHS)
common-docker-tag-latest: $(TAG_DOCKER_ARCHS)
$(TAG_DOCKER_ARCHS): common-docker-tag-latest-%:
- docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:latest"
- docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:v$(DOCKER_MAJOR_VERSION_TAG)"
+ docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:latest"
+ docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:v$(DOCKER_MAJOR_VERSION_TAG)"
.PHONY: common-docker-manifest
common-docker-manifest:
- DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" $(foreach ARCH,$(DOCKER_ARCHS),$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$(ARCH):$(DOCKER_IMAGE_TAG))
- DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)"
+ DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(SANITIZED_DOCKER_IMAGE_TAG)" $(foreach ARCH,$(DOCKER_ARCHS),$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$(ARCH):$(SANITIZED_DOCKER_IMAGE_TAG))
+ DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(SANITIZED_DOCKER_IMAGE_TAG)"
.PHONY: promu
promu: $(PROMU)
diff --git a/vendor/github.com/prometheus/procfs/cpuinfo.go b/vendor/github.com/prometheus/procfs/cpuinfo.go
index ff6b927da1..06968ca2ed 100644
--- a/vendor/github.com/prometheus/procfs/cpuinfo.go
+++ b/vendor/github.com/prometheus/procfs/cpuinfo.go
@@ -380,6 +380,42 @@ func parseCPUInfoMips(info []byte) ([]CPUInfo, error) {
return cpuinfo, nil
}
+func parseCPUInfoLoong(info []byte) ([]CPUInfo, error) {
+ scanner := bufio.NewScanner(bytes.NewReader(info))
+ // find the first "processor" line
+ firstLine := firstNonEmptyLine(scanner)
+ if !strings.HasPrefix(firstLine, "system type") || !strings.Contains(firstLine, ":") {
+ return nil, errors.New("invalid cpuinfo file: " + firstLine)
+ }
+ field := strings.SplitN(firstLine, ": ", 2)
+ cpuinfo := []CPUInfo{}
+ systemType := field[1]
+ i := 0
+ for scanner.Scan() {
+ line := scanner.Text()
+ if !strings.Contains(line, ":") {
+ continue
+ }
+ field := strings.SplitN(line, ": ", 2)
+ switch strings.TrimSpace(field[0]) {
+ case "processor":
+ v, err := strconv.ParseUint(field[1], 0, 32)
+ if err != nil {
+ return nil, err
+ }
+ i = int(v)
+ cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor
+ cpuinfo[i].Processor = uint(v)
+ cpuinfo[i].VendorID = systemType
+ case "CPU Family":
+ cpuinfo[i].CPUFamily = field[1]
+ case "Model Name":
+ cpuinfo[i].ModelName = field[1]
+ }
+ }
+ return cpuinfo, nil
+}
+
func parseCPUInfoPPC(info []byte) ([]CPUInfo, error) {
scanner := bufio.NewScanner(bytes.NewReader(info))
diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_loong64.go b/vendor/github.com/prometheus/procfs/cpuinfo_loong64.go
new file mode 100644
index 0000000000..d88442f0ed
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/cpuinfo_loong64.go
@@ -0,0 +1,19 @@
+// Copyright 2022 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build linux
+// +build linux
+
+package procfs
+
+var parseCPUInfo = parseCPUInfoLoong
diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_others.go b/vendor/github.com/prometheus/procfs/cpuinfo_others.go
index ea41bf2ca1..a6b2b3127c 100644
--- a/vendor/github.com/prometheus/procfs/cpuinfo_others.go
+++ b/vendor/github.com/prometheus/procfs/cpuinfo_others.go
@@ -11,8 +11,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-//go:build linux && !386 && !amd64 && !arm && !arm64 && !mips && !mips64 && !mips64le && !mipsle && !ppc64 && !ppc64le && !riscv64 && !s390x
-// +build linux,!386,!amd64,!arm,!arm64,!mips,!mips64,!mips64le,!mipsle,!ppc64,!ppc64le,!riscv64,!s390x
+//go:build linux && !386 && !amd64 && !arm && !arm64 && !loong64 && !mips && !mips64 && !mips64le && !mipsle && !ppc64 && !ppc64le && !riscv64 && !s390x
+// +build linux,!386,!amd64,!arm,!arm64,!loong64,!mips,!mips64,!mips64le,!mipsle,!ppc64,!ppc64le,!riscv64,!s390x
package procfs
diff --git a/vendor/github.com/prometheus/procfs/doc.go b/vendor/github.com/prometheus/procfs/doc.go
index d31a82600f..f9d961e441 100644
--- a/vendor/github.com/prometheus/procfs/doc.go
+++ b/vendor/github.com/prometheus/procfs/doc.go
@@ -16,30 +16,29 @@
//
// Example:
//
-// package main
-//
-// import (
-// "fmt"
-// "log"
-//
-// "github.com/prometheus/procfs"
-// )
-//
-// func main() {
-// p, err := procfs.Self()
-// if err != nil {
-// log.Fatalf("could not get process: %s", err)
-// }
-//
-// stat, err := p.Stat()
-// if err != nil {
-// log.Fatalf("could not get process stat: %s", err)
-// }
-//
-// fmt.Printf("command: %s\n", stat.Comm)
-// fmt.Printf("cpu time: %fs\n", stat.CPUTime())
-// fmt.Printf("vsize: %dB\n", stat.VirtualMemory())
-// fmt.Printf("rss: %dB\n", stat.ResidentMemory())
-// }
-//
+// package main
+//
+// import (
+// "fmt"
+// "log"
+//
+// "github.com/prometheus/procfs"
+// )
+//
+// func main() {
+// p, err := procfs.Self()
+// if err != nil {
+// log.Fatalf("could not get process: %s", err)
+// }
+//
+// stat, err := p.Stat()
+// if err != nil {
+// log.Fatalf("could not get process stat: %s", err)
+// }
+//
+// fmt.Printf("command: %s\n", stat.Comm)
+// fmt.Printf("cpu time: %fs\n", stat.CPUTime())
+// fmt.Printf("vsize: %dB\n", stat.VirtualMemory())
+// fmt.Printf("rss: %dB\n", stat.ResidentMemory())
+// }
package procfs
diff --git a/vendor/github.com/prometheus/procfs/fs.go b/vendor/github.com/prometheus/procfs/fs.go
index 0102ab0fd8..60c551e026 100644
--- a/vendor/github.com/prometheus/procfs/fs.go
+++ b/vendor/github.com/prometheus/procfs/fs.go
@@ -21,6 +21,7 @@ import (
// kernel data structures.
type FS struct {
proc fs.FS
+ real bool
}
// DefaultMountPoint is the common mount point of the proc filesystem.
@@ -39,5 +40,11 @@ func NewFS(mountPoint string) (FS, error) {
if err != nil {
return FS{}, err
}
- return FS{fs}, nil
+
+ real, err := isRealProc(mountPoint)
+ if err != nil {
+ return FS{}, err
+ }
+
+ return FS{fs, real}, nil
}
diff --git a/vendor/github.com/prometheus/procfs/fs_statfs_notype.go b/vendor/github.com/prometheus/procfs/fs_statfs_notype.go
new file mode 100644
index 0000000000..8005769689
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/fs_statfs_notype.go
@@ -0,0 +1,23 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build netbsd || openbsd || solaris || windows
+// +build netbsd openbsd solaris windows
+
+package procfs
+
+// isRealProc returns true on architectures that don't have a Type argument
+// in their Statfs_t struct
+func isRealProc(mountPoint string) (bool, error) {
+ return true, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/fs_statfs_type.go b/vendor/github.com/prometheus/procfs/fs_statfs_type.go
new file mode 100644
index 0000000000..6233217ad2
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/fs_statfs_type.go
@@ -0,0 +1,33 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !netbsd && !openbsd && !solaris && !windows
+// +build !netbsd,!openbsd,!solaris,!windows
+
+package procfs
+
+import (
+ "syscall"
+)
+
+// isRealProc determines whether supplied mountpoint is really a proc filesystem.
+func isRealProc(mountPoint string) (bool, error) {
+ stat := syscall.Statfs_t{}
+ err := syscall.Statfs(mountPoint, &stat)
+ if err != nil {
+ return false, err
+ }
+
+ // 0x9fa0 is PROC_SUPER_MAGIC: https://elixir.bootlin.com/linux/v6.1/source/include/uapi/linux/magic.h#L87
+ return stat.Type == 0x9fa0, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/internal/util/parse.go b/vendor/github.com/prometheus/procfs/internal/util/parse.go
index b030951faf..14272dc788 100644
--- a/vendor/github.com/prometheus/procfs/internal/util/parse.go
+++ b/vendor/github.com/prometheus/procfs/internal/util/parse.go
@@ -64,6 +64,21 @@ func ParsePInt64s(ss []string) ([]*int64, error) {
return us, nil
}
+// Parses a uint64 from given hex in string.
+func ParseHexUint64s(ss []string) ([]*uint64, error) {
+ us := make([]*uint64, 0, len(ss))
+ for _, s := range ss {
+ u, err := strconv.ParseUint(s, 16, 64)
+ if err != nil {
+ return nil, err
+ }
+
+ us = append(us, &u)
+ }
+
+ return us, nil
+}
+
// ReadUintFromFile reads a file and attempts to parse a uint64 from it.
func ReadUintFromFile(path string) (uint64, error) {
data, err := os.ReadFile(path)
diff --git a/vendor/github.com/prometheus/procfs/mountstats.go b/vendor/github.com/prometheus/procfs/mountstats.go
index f7a828bb1d..7f68890cff 100644
--- a/vendor/github.com/prometheus/procfs/mountstats.go
+++ b/vendor/github.com/prometheus/procfs/mountstats.go
@@ -186,6 +186,8 @@ type NFSOperationStats struct {
CumulativeTotalResponseMilliseconds uint64
// Duration from when a request was enqueued to when it was completely handled.
CumulativeTotalRequestMilliseconds uint64
+ // The average time from the point the client sends RPC requests until it receives the response.
+ AverageRTTMilliseconds float64
// The count of operations that complete with tk_status < 0. These statuses usually indicate error conditions.
Errors uint64
}
@@ -284,7 +286,8 @@ func parseMountStats(r io.Reader) ([]*Mount, error) {
}
// parseMount parses an entry in /proc/[pid]/mountstats in the format:
-// device [device] mounted on [mount] with fstype [type]
+//
+// device [device] mounted on [mount] with fstype [type]
func parseMount(ss []string) (*Mount, error) {
if len(ss) < deviceEntryLen {
return nil, fmt.Errorf("invalid device entry: %v", ss)
@@ -533,7 +536,6 @@ func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) {
ns = append(ns, n)
}
-
opStats := NFSOperationStats{
Operation: strings.TrimSuffix(ss[0], ":"),
Requests: ns[0],
@@ -545,6 +547,9 @@ func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) {
CumulativeTotalResponseMilliseconds: ns[6],
CumulativeTotalRequestMilliseconds: ns[7],
}
+ if ns[0] != 0 {
+ opStats.AverageRTTMilliseconds = float64(ns[6]) / float64(ns[0])
+ }
if len(ns) > 8 {
opStats.Errors = ns[8]
diff --git a/vendor/github.com/prometheus/procfs/net_conntrackstat.go b/vendor/github.com/prometheus/procfs/net_conntrackstat.go
index 8300daca05..64a0e94606 100644
--- a/vendor/github.com/prometheus/procfs/net_conntrackstat.go
+++ b/vendor/github.com/prometheus/procfs/net_conntrackstat.go
@@ -18,7 +18,6 @@ import (
"bytes"
"fmt"
"io"
- "strconv"
"strings"
"github.com/prometheus/procfs/internal/util"
@@ -28,9 +27,13 @@ import (
// and contains netfilter conntrack statistics at one CPU core.
type ConntrackStatEntry struct {
Entries uint64
+ Searched uint64
Found uint64
+ New uint64
Invalid uint64
Ignore uint64
+ Delete uint64
+ DeleteList uint64
Insert uint64
InsertFailed uint64
Drop uint64
@@ -81,73 +84,34 @@ func parseConntrackStat(r io.Reader) ([]ConntrackStatEntry, error) {
// Parses a ConntrackStatEntry from given array of fields.
func parseConntrackStatEntry(fields []string) (*ConntrackStatEntry, error) {
- if len(fields) != 17 {
- return nil, fmt.Errorf("invalid conntrackstat entry, missing fields")
- }
- entry := &ConntrackStatEntry{}
-
- entries, err := parseConntrackStatField(fields[0])
- if err != nil {
- return nil, err
- }
- entry.Entries = entries
-
- found, err := parseConntrackStatField(fields[2])
- if err != nil {
- return nil, err
- }
- entry.Found = found
-
- invalid, err := parseConntrackStatField(fields[4])
- if err != nil {
- return nil, err
- }
- entry.Invalid = invalid
-
- ignore, err := parseConntrackStatField(fields[5])
- if err != nil {
- return nil, err
- }
- entry.Ignore = ignore
-
- insert, err := parseConntrackStatField(fields[8])
+ entries, err := util.ParseHexUint64s(fields)
if err != nil {
- return nil, err
+ return nil, fmt.Errorf("invalid conntrackstat entry, couldn't parse fields: %s", err)
}
- entry.Insert = insert
-
- insertFailed, err := parseConntrackStatField(fields[9])
- if err != nil {
- return nil, err
+ numEntries := len(entries)
+ if numEntries < 16 || numEntries > 17 {
+ return nil, fmt.Errorf("invalid conntrackstat entry, invalid number of fields: %d", numEntries)
}
- entry.InsertFailed = insertFailed
- drop, err := parseConntrackStatField(fields[10])
- if err != nil {
- return nil, err
+ stats := &ConntrackStatEntry{
+ Entries: *entries[0],
+ Searched: *entries[1],
+ Found: *entries[2],
+ New: *entries[3],
+ Invalid: *entries[4],
+ Ignore: *entries[5],
+ Delete: *entries[6],
+ DeleteList: *entries[7],
+ Insert: *entries[8],
+ InsertFailed: *entries[9],
+ Drop: *entries[10],
+ EarlyDrop: *entries[11],
}
- entry.Drop = drop
- earlyDrop, err := parseConntrackStatField(fields[11])
- if err != nil {
- return nil, err
+ // Ignore missing search_restart on Linux < 2.6.35.
+ if numEntries == 17 {
+ stats.SearchRestart = *entries[16]
}
- entry.EarlyDrop = earlyDrop
- searchRestart, err := parseConntrackStatField(fields[16])
- if err != nil {
- return nil, err
- }
- entry.SearchRestart = searchRestart
-
- return entry, nil
-}
-
-// Parses a uint64 from given hex in string.
-func parseConntrackStatField(field string) (uint64, error) {
- val, err := strconv.ParseUint(field, 16, 64)
- if err != nil {
- return 0, fmt.Errorf("couldn't parse %q field: %w", field, err)
- }
- return val, err
+ return stats, nil
}
diff --git a/vendor/github.com/prometheus/procfs/net_softnet.go b/vendor/github.com/prometheus/procfs/net_softnet.go
index a94f86dc4a..540cea52c6 100644
--- a/vendor/github.com/prometheus/procfs/net_softnet.go
+++ b/vendor/github.com/prometheus/procfs/net_softnet.go
@@ -27,8 +27,9 @@ import (
// For the proc file format details,
// See:
// * Linux 2.6.23 https://elixir.bootlin.com/linux/v2.6.23/source/net/core/dev.c#L2343
-// * Linux 4.17 https://elixir.bootlin.com/linux/v4.17/source/net/core/net-procfs.c#L162
-// and https://elixir.bootlin.com/linux/v4.17/source/include/linux/netdevice.h#L2810.
+// * Linux 2.6.39 https://elixir.bootlin.com/linux/v2.6.39/source/net/core/dev.c#L4086
+// * Linux 4.18 https://elixir.bootlin.com/linux/v4.18/source/net/core/net-procfs.c#L162
+// * Linux 5.14 https://elixir.bootlin.com/linux/v5.14/source/net/core/net-procfs.c#L169
// SoftnetStat contains a single row of data from /proc/net/softnet_stat.
type SoftnetStat struct {
@@ -38,6 +39,18 @@ type SoftnetStat struct {
Dropped uint32
// Number of times processing packets ran out of quota.
TimeSqueezed uint32
+ // Number of collision occur while obtaining device lock while transmitting.
+ CPUCollision uint32
+ // Number of times cpu woken up received_rps.
+ ReceivedRps uint32
+ // number of times flow limit has been reached.
+ FlowLimitCount uint32
+ // Softnet backlog status.
+ SoftnetBacklogLen uint32
+ // CPU id owning this softnet_data.
+ Index uint32
+ // softnet_data's Width.
+ Width int
}
var softNetProcFile = "net/softnet_stat"
@@ -63,25 +76,65 @@ func parseSoftnet(r io.Reader) ([]SoftnetStat, error) {
s := bufio.NewScanner(r)
var stats []SoftnetStat
+ cpuIndex := 0
for s.Scan() {
columns := strings.Fields(s.Text())
width := len(columns)
+ softnetStat := SoftnetStat{}
if width < minColumns {
return nil, fmt.Errorf("%d columns were detected, but at least %d were expected", width, minColumns)
}
- // We only parse the first three columns at the moment.
- us, err := parseHexUint32s(columns[0:3])
- if err != nil {
- return nil, err
+ // Linux 2.6.23 https://elixir.bootlin.com/linux/v2.6.23/source/net/core/dev.c#L2347
+ if width >= minColumns {
+ us, err := parseHexUint32s(columns[0:9])
+ if err != nil {
+ return nil, err
+ }
+
+ softnetStat.Processed = us[0]
+ softnetStat.Dropped = us[1]
+ softnetStat.TimeSqueezed = us[2]
+ softnetStat.CPUCollision = us[8]
+ }
+
+ // Linux 2.6.39 https://elixir.bootlin.com/linux/v2.6.39/source/net/core/dev.c#L4086
+ if width >= 10 {
+ us, err := parseHexUint32s(columns[9:10])
+ if err != nil {
+ return nil, err
+ }
+
+ softnetStat.ReceivedRps = us[0]
}
- stats = append(stats, SoftnetStat{
- Processed: us[0],
- Dropped: us[1],
- TimeSqueezed: us[2],
- })
+ // Linux 4.18 https://elixir.bootlin.com/linux/v4.18/source/net/core/net-procfs.c#L162
+ if width >= 11 {
+ us, err := parseHexUint32s(columns[10:11])
+ if err != nil {
+ return nil, err
+ }
+
+ softnetStat.FlowLimitCount = us[0]
+ }
+
+ // Linux 5.14 https://elixir.bootlin.com/linux/v5.14/source/net/core/net-procfs.c#L169
+ if width >= 13 {
+ us, err := parseHexUint32s(columns[11:13])
+ if err != nil {
+ return nil, err
+ }
+
+ softnetStat.SoftnetBacklogLen = us[0]
+ softnetStat.Index = us[1]
+ } else {
+ // For older kernels, create the Index based on the scan line number.
+ softnetStat.Index = uint32(cpuIndex)
+ }
+ softnetStat.Width = width
+ stats = append(stats, softnetStat)
+ cpuIndex++
}
return stats, nil
diff --git a/vendor/github.com/prometheus/procfs/net_wireless.go b/vendor/github.com/prometheus/procfs/net_wireless.go
new file mode 100644
index 0000000000..c80fb15424
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/net_wireless.go
@@ -0,0 +1,182 @@
+// Copyright 2023 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "io"
+ "strconv"
+ "strings"
+
+ "github.com/prometheus/procfs/internal/util"
+)
+
+// Wireless models the content of /proc/net/wireless.
+type Wireless struct {
+ Name string
+
+ // Status is the current 4-digit hex value status of the interface.
+ Status uint64
+
+ // QualityLink is the link quality.
+ QualityLink int
+
+ // QualityLevel is the signal gain (dBm).
+ QualityLevel int
+
+ // QualityNoise is the signal noise baseline (dBm).
+ QualityNoise int
+
+ // DiscardedNwid is the number of discarded packets with wrong nwid/essid.
+ DiscardedNwid int
+
+ // DiscardedCrypt is the number of discarded packets with wrong code/decode (WEP).
+ DiscardedCrypt int
+
+ // DiscardedFrag is the number of discarded packets that can't perform MAC reassembly.
+ DiscardedFrag int
+
+ // DiscardedRetry is the number of discarded packets that reached max MAC retries.
+ DiscardedRetry int
+
+ // DiscardedMisc is the number of discarded packets for other reasons.
+ DiscardedMisc int
+
+ // MissedBeacon is the number of missed beacons/superframe.
+ MissedBeacon int
+}
+
+// Wireless returns kernel wireless statistics.
+func (fs FS) Wireless() ([]*Wireless, error) {
+ b, err := util.ReadFileNoStat(fs.proc.Path("net/wireless"))
+ if err != nil {
+ return nil, err
+ }
+
+ m, err := parseWireless(bytes.NewReader(b))
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse wireless: %w", err)
+ }
+
+ return m, nil
+}
+
+// parseWireless parses the contents of /proc/net/wireless.
+/*
+Inter-| sta-| Quality | Discarded packets | Missed | WE
+face | tus | link level noise | nwid crypt frag retry misc | beacon | 22
+ eth1: 0000 5. -256. -10. 0 1 0 3 0 0
+ eth2: 0000 5. -256. -20. 0 2 0 4 0 0
+*/
+func parseWireless(r io.Reader) ([]*Wireless, error) {
+ var (
+ interfaces []*Wireless
+ scanner = bufio.NewScanner(r)
+ )
+
+ for n := 0; scanner.Scan(); n++ {
+ // Skip the 2 header lines.
+ if n < 2 {
+ continue
+ }
+
+ line := scanner.Text()
+
+ parts := strings.Split(line, ":")
+ if len(parts) != 2 {
+ return nil, fmt.Errorf("expected 2 parts after splitting line by ':', got %d for line %q", len(parts), line)
+ }
+
+ name := strings.TrimSpace(parts[0])
+ stats := strings.Fields(parts[1])
+
+ if len(stats) < 10 {
+ return nil, fmt.Errorf("invalid number of fields in line %d, expected at least 10, got %d: %q", n, len(stats), line)
+ }
+
+ status, err := strconv.ParseUint(stats[0], 16, 16)
+ if err != nil {
+ return nil, fmt.Errorf("invalid status in line %d: %q", n, line)
+ }
+
+ qlink, err := strconv.Atoi(strings.TrimSuffix(stats[1], "."))
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse Quality:link as integer %q: %w", qlink, err)
+ }
+
+ qlevel, err := strconv.Atoi(strings.TrimSuffix(stats[2], "."))
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse Quality:level as integer %q: %w", qlevel, err)
+ }
+
+ qnoise, err := strconv.Atoi(strings.TrimSuffix(stats[3], "."))
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse Quality:noise as integer %q: %w", qnoise, err)
+ }
+
+ dnwid, err := strconv.Atoi(stats[4])
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse Discarded:nwid as integer %q: %w", dnwid, err)
+ }
+
+ dcrypt, err := strconv.Atoi(stats[5])
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse Discarded:crypt as integer %q: %w", dcrypt, err)
+ }
+
+ dfrag, err := strconv.Atoi(stats[6])
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse Discarded:frag as integer %q: %w", dfrag, err)
+ }
+
+ dretry, err := strconv.Atoi(stats[7])
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse Discarded:retry as integer %q: %w", dretry, err)
+ }
+
+ dmisc, err := strconv.Atoi(stats[8])
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse Discarded:misc as integer %q: %w", dmisc, err)
+ }
+
+ mbeacon, err := strconv.Atoi(stats[9])
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse Missed:beacon as integer %q: %w", mbeacon, err)
+ }
+
+ w := &Wireless{
+ Name: name,
+ Status: status,
+ QualityLink: qlink,
+ QualityLevel: qlevel,
+ QualityNoise: qnoise,
+ DiscardedNwid: dnwid,
+ DiscardedCrypt: dcrypt,
+ DiscardedFrag: dfrag,
+ DiscardedRetry: dretry,
+ DiscardedMisc: dmisc,
+ MissedBeacon: mbeacon,
+ }
+
+ interfaces = append(interfaces, w)
+ }
+
+ if err := scanner.Err(); err != nil {
+ return nil, fmt.Errorf("failed to scan /proc/net/wireless: %w", err)
+ }
+
+ return interfaces, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/netstat.go b/vendor/github.com/prometheus/procfs/netstat.go
index dcea9c5a67..742dff453b 100644
--- a/vendor/github.com/prometheus/procfs/netstat.go
+++ b/vendor/github.com/prometheus/procfs/netstat.go
@@ -37,32 +37,46 @@ func (fs FS) NetStat() ([]NetStat, error) {
var netStatsTotal []NetStat
for _, filePath := range statFiles {
- file, err := os.Open(filePath)
+ procNetstat, err := parseNetstat(filePath)
if err != nil {
return nil, err
}
+ procNetstat.Filename = filepath.Base(filePath)
- netStatFile := NetStat{
- Filename: filepath.Base(filePath),
- Stats: make(map[string][]uint64),
- }
- scanner := bufio.NewScanner(file)
- scanner.Scan()
- // First string is always a header for stats
- var headers []string
- headers = append(headers, strings.Fields(scanner.Text())...)
+ netStatsTotal = append(netStatsTotal, procNetstat)
+ }
+ return netStatsTotal, nil
+}
+
+// parseNetstat parses the metrics from `/proc/net/stat/` file
+// and returns a NetStat structure.
+func parseNetstat(filePath string) (NetStat, error) {
+ netStat := NetStat{
+ Stats: make(map[string][]uint64),
+ }
+ file, err := os.Open(filePath)
+ if err != nil {
+ return netStat, err
+ }
+ defer file.Close()
+
+ scanner := bufio.NewScanner(file)
+ scanner.Scan()
- // Other strings represent per-CPU counters
- for scanner.Scan() {
- for num, counter := range strings.Fields(scanner.Text()) {
- value, err := strconv.ParseUint(counter, 16, 64)
- if err != nil {
- return nil, err
- }
- netStatFile.Stats[headers[num]] = append(netStatFile.Stats[headers[num]], value)
+ // First string is always a header for stats
+ var headers []string
+ headers = append(headers, strings.Fields(scanner.Text())...)
+
+ // Other strings represent per-CPU counters
+ for scanner.Scan() {
+ for num, counter := range strings.Fields(scanner.Text()) {
+ value, err := strconv.ParseUint(counter, 16, 64)
+ if err != nil {
+ return NetStat{}, err
}
+ netStat.Stats[headers[num]] = append(netStat.Stats[headers[num]], value)
}
- netStatsTotal = append(netStatsTotal, netStatFile)
}
- return netStatsTotal, nil
+
+ return netStat, nil
}
diff --git a/vendor/github.com/prometheus/procfs/proc.go b/vendor/github.com/prometheus/procfs/proc.go
index c30223af72..48f39dafd2 100644
--- a/vendor/github.com/prometheus/procfs/proc.go
+++ b/vendor/github.com/prometheus/procfs/proc.go
@@ -21,7 +21,6 @@ import (
"strconv"
"strings"
- "github.com/prometheus/procfs/internal/fs"
"github.com/prometheus/procfs/internal/util"
)
@@ -30,7 +29,7 @@ type Proc struct {
// The process ID.
PID int
- fs fs.FS
+ fs FS
}
// Procs represents a list of Proc structs.
@@ -92,7 +91,7 @@ func (fs FS) Proc(pid int) (Proc, error) {
if _, err := os.Stat(fs.proc.Path(strconv.Itoa(pid))); err != nil {
return Proc{}, err
}
- return Proc{PID: pid, fs: fs.proc}, nil
+ return Proc{PID: pid, fs: fs}, nil
}
// AllProcs returns a list of all currently available processes.
@@ -114,7 +113,7 @@ func (fs FS) AllProcs() (Procs, error) {
if err != nil {
continue
}
- p = append(p, Proc{PID: int(pid), fs: fs.proc})
+ p = append(p, Proc{PID: int(pid), fs: fs})
}
return p, nil
@@ -237,6 +236,19 @@ func (p Proc) FileDescriptorTargets() ([]string, error) {
// FileDescriptorsLen returns the number of currently open file descriptors of
// a process.
func (p Proc) FileDescriptorsLen() (int, error) {
+ // Use fast path if available (Linux v6.2): https://github.com/torvalds/linux/commit/f1f1f2569901
+ if p.fs.real {
+ stat, err := os.Stat(p.path("fd"))
+ if err != nil {
+ return 0, err
+ }
+
+ size := stat.Size()
+ if size > 0 {
+ return int(size), nil
+ }
+ }
+
fds, err := p.fileDescriptors()
if err != nil {
return 0, err
@@ -285,7 +297,7 @@ func (p Proc) fileDescriptors() ([]string, error) {
}
func (p Proc) path(pa ...string) string {
- return p.fs.Path(append([]string{strconv.Itoa(p.PID)}, pa...)...)
+ return p.fs.proc.Path(append([]string{strconv.Itoa(p.PID)}, pa...)...)
}
// FileDescriptorsInfo retrieves information about all file descriptors of
diff --git a/vendor/github.com/prometheus/procfs/proc_cgroup.go b/vendor/github.com/prometheus/procfs/proc_cgroup.go
index cca03327c3..ea83a75ffc 100644
--- a/vendor/github.com/prometheus/procfs/proc_cgroup.go
+++ b/vendor/github.com/prometheus/procfs/proc_cgroup.go
@@ -23,7 +23,7 @@ import (
"github.com/prometheus/procfs/internal/util"
)
-// Cgroup models one line from /proc/[pid]/cgroup. Each Cgroup struct describes the the placement of a PID inside a
+// Cgroup models one line from /proc/[pid]/cgroup. Each Cgroup struct describes the placement of a PID inside a
// specific control hierarchy. The kernel has two cgroup APIs, v1 and v2. v1 has one hierarchy per available resource
// controller, while v2 has one unified hierarchy shared by all controllers. Regardless of v1 or v2, all hierarchies
// contain all running processes, so the question answerable with a Cgroup struct is 'where is this process in
diff --git a/vendor/github.com/prometheus/procfs/proc_interrupts.go b/vendor/github.com/prometheus/procfs/proc_interrupts.go
new file mode 100644
index 0000000000..9df79c2379
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc_interrupts.go
@@ -0,0 +1,98 @@
+// Copyright 2022 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "bufio"
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "strconv"
+ "strings"
+
+ "github.com/prometheus/procfs/internal/util"
+)
+
+// Interrupt represents a single interrupt line.
+type Interrupt struct {
+ // Info is the type of interrupt.
+ Info string
+ // Devices is the name of the device that is located at that IRQ
+ Devices string
+ // Values is the number of interrupts per CPU.
+ Values []string
+}
+
+// Interrupts models the content of /proc/interrupts. Key is the IRQ number.
+// - https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/6/html/deployment_guide/s2-proc-interrupts
+// - https://raspberrypi.stackexchange.com/questions/105802/explanation-of-proc-interrupts-output
+type Interrupts map[string]Interrupt
+
+// Interrupts creates a new instance from a given Proc instance.
+func (p Proc) Interrupts() (Interrupts, error) {
+ data, err := util.ReadFileNoStat(p.path("interrupts"))
+ if err != nil {
+ return nil, err
+ }
+ return parseInterrupts(bytes.NewReader(data))
+}
+
+func parseInterrupts(r io.Reader) (Interrupts, error) {
+ var (
+ interrupts = Interrupts{}
+ scanner = bufio.NewScanner(r)
+ )
+
+ if !scanner.Scan() {
+ return nil, errors.New("interrupts empty")
+ }
+ cpuNum := len(strings.Fields(scanner.Text())) // one header per cpu
+
+ for scanner.Scan() {
+ parts := strings.Fields(scanner.Text())
+ if len(parts) == 0 { // skip empty lines
+ continue
+ }
+ if len(parts) < 2 {
+ return nil, fmt.Errorf("not enough fields in interrupts (expected at least 2 fields but got %d): %s", len(parts), parts)
+ }
+ intName := parts[0][:len(parts[0])-1] // remove trailing :
+
+ if len(parts) == 2 {
+ interrupts[intName] = Interrupt{
+ Info: "",
+ Devices: "",
+ Values: []string{
+ parts[1],
+ },
+ }
+ continue
+ }
+
+ intr := Interrupt{
+ Values: parts[1 : cpuNum+1],
+ }
+
+ if _, err := strconv.Atoi(intName); err == nil { // numeral interrupt
+ intr.Info = parts[cpuNum+1]
+ intr.Devices = strings.Join(parts[cpuNum+2:], " ")
+ } else {
+ intr.Info = strings.Join(parts[cpuNum+1:], " ")
+ }
+ interrupts[intName] = intr
+ }
+
+ return interrupts, scanner.Err()
+}
diff --git a/vendor/github.com/prometheus/procfs/proc_netstat.go b/vendor/github.com/prometheus/procfs/proc_netstat.go
index 48b5238194..6a43bb2459 100644
--- a/vendor/github.com/prometheus/procfs/proc_netstat.go
+++ b/vendor/github.com/prometheus/procfs/proc_netstat.go
@@ -33,139 +33,140 @@ type ProcNetstat struct {
}
type TcpExt struct { // nolint:revive
- SyncookiesSent float64
- SyncookiesRecv float64
- SyncookiesFailed float64
- EmbryonicRsts float64
- PruneCalled float64
- RcvPruned float64
- OfoPruned float64
- OutOfWindowIcmps float64
- LockDroppedIcmps float64
- ArpFilter float64
- TW float64
- TWRecycled float64
- TWKilled float64
- PAWSActive float64
- PAWSEstab float64
- DelayedACKs float64
- DelayedACKLocked float64
- DelayedACKLost float64
- ListenOverflows float64
- ListenDrops float64
- TCPHPHits float64
- TCPPureAcks float64
- TCPHPAcks float64
- TCPRenoRecovery float64
- TCPSackRecovery float64
- TCPSACKReneging float64
- TCPSACKReorder float64
- TCPRenoReorder float64
- TCPTSReorder float64
- TCPFullUndo float64
- TCPPartialUndo float64
- TCPDSACKUndo float64
- TCPLossUndo float64
- TCPLostRetransmit float64
- TCPRenoFailures float64
- TCPSackFailures float64
- TCPLossFailures float64
- TCPFastRetrans float64
- TCPSlowStartRetrans float64
- TCPTimeouts float64
- TCPLossProbes float64
- TCPLossProbeRecovery float64
- TCPRenoRecoveryFail float64
- TCPSackRecoveryFail float64
- TCPRcvCollapsed float64
- TCPDSACKOldSent float64
- TCPDSACKOfoSent float64
- TCPDSACKRecv float64
- TCPDSACKOfoRecv float64
- TCPAbortOnData float64
- TCPAbortOnClose float64
- TCPAbortOnMemory float64
- TCPAbortOnTimeout float64
- TCPAbortOnLinger float64
- TCPAbortFailed float64
- TCPMemoryPressures float64
- TCPMemoryPressuresChrono float64
- TCPSACKDiscard float64
- TCPDSACKIgnoredOld float64
- TCPDSACKIgnoredNoUndo float64
- TCPSpuriousRTOs float64
- TCPMD5NotFound float64
- TCPMD5Unexpected float64
- TCPMD5Failure float64
- TCPSackShifted float64
- TCPSackMerged float64
- TCPSackShiftFallback float64
- TCPBacklogDrop float64
- PFMemallocDrop float64
- TCPMinTTLDrop float64
- TCPDeferAcceptDrop float64
- IPReversePathFilter float64
- TCPTimeWaitOverflow float64
- TCPReqQFullDoCookies float64
- TCPReqQFullDrop float64
- TCPRetransFail float64
- TCPRcvCoalesce float64
- TCPOFOQueue float64
- TCPOFODrop float64
- TCPOFOMerge float64
- TCPChallengeACK float64
- TCPSYNChallenge float64
- TCPFastOpenActive float64
- TCPFastOpenActiveFail float64
- TCPFastOpenPassive float64
- TCPFastOpenPassiveFail float64
- TCPFastOpenListenOverflow float64
- TCPFastOpenCookieReqd float64
- TCPFastOpenBlackhole float64
- TCPSpuriousRtxHostQueues float64
- BusyPollRxPackets float64
- TCPAutoCorking float64
- TCPFromZeroWindowAdv float64
- TCPToZeroWindowAdv float64
- TCPWantZeroWindowAdv float64
- TCPSynRetrans float64
- TCPOrigDataSent float64
- TCPHystartTrainDetect float64
- TCPHystartTrainCwnd float64
- TCPHystartDelayDetect float64
- TCPHystartDelayCwnd float64
- TCPACKSkippedSynRecv float64
- TCPACKSkippedPAWS float64
- TCPACKSkippedSeq float64
- TCPACKSkippedFinWait2 float64
- TCPACKSkippedTimeWait float64
- TCPACKSkippedChallenge float64
- TCPWinProbe float64
- TCPKeepAlive float64
- TCPMTUPFail float64
- TCPMTUPSuccess float64
- TCPWqueueTooBig float64
+ SyncookiesSent *float64
+ SyncookiesRecv *float64
+ SyncookiesFailed *float64
+ EmbryonicRsts *float64
+ PruneCalled *float64
+ RcvPruned *float64
+ OfoPruned *float64
+ OutOfWindowIcmps *float64
+ LockDroppedIcmps *float64
+ ArpFilter *float64
+ TW *float64
+ TWRecycled *float64
+ TWKilled *float64
+ PAWSActive *float64
+ PAWSEstab *float64
+ DelayedACKs *float64
+ DelayedACKLocked *float64
+ DelayedACKLost *float64
+ ListenOverflows *float64
+ ListenDrops *float64
+ TCPHPHits *float64
+ TCPPureAcks *float64
+ TCPHPAcks *float64
+ TCPRenoRecovery *float64
+ TCPSackRecovery *float64
+ TCPSACKReneging *float64
+ TCPSACKReorder *float64
+ TCPRenoReorder *float64
+ TCPTSReorder *float64
+ TCPFullUndo *float64
+ TCPPartialUndo *float64
+ TCPDSACKUndo *float64
+ TCPLossUndo *float64
+ TCPLostRetransmit *float64
+ TCPRenoFailures *float64
+ TCPSackFailures *float64
+ TCPLossFailures *float64
+ TCPFastRetrans *float64
+ TCPSlowStartRetrans *float64
+ TCPTimeouts *float64
+ TCPLossProbes *float64
+ TCPLossProbeRecovery *float64
+ TCPRenoRecoveryFail *float64
+ TCPSackRecoveryFail *float64
+ TCPRcvCollapsed *float64
+ TCPDSACKOldSent *float64
+ TCPDSACKOfoSent *float64
+ TCPDSACKRecv *float64
+ TCPDSACKOfoRecv *float64
+ TCPAbortOnData *float64
+ TCPAbortOnClose *float64
+ TCPAbortOnMemory *float64
+ TCPAbortOnTimeout *float64
+ TCPAbortOnLinger *float64
+ TCPAbortFailed *float64
+ TCPMemoryPressures *float64
+ TCPMemoryPressuresChrono *float64
+ TCPSACKDiscard *float64
+ TCPDSACKIgnoredOld *float64
+ TCPDSACKIgnoredNoUndo *float64
+ TCPSpuriousRTOs *float64
+ TCPMD5NotFound *float64
+ TCPMD5Unexpected *float64
+ TCPMD5Failure *float64
+ TCPSackShifted *float64
+ TCPSackMerged *float64
+ TCPSackShiftFallback *float64
+ TCPBacklogDrop *float64
+ PFMemallocDrop *float64
+ TCPMinTTLDrop *float64
+ TCPDeferAcceptDrop *float64
+ IPReversePathFilter *float64
+ TCPTimeWaitOverflow *float64
+ TCPReqQFullDoCookies *float64
+ TCPReqQFullDrop *float64
+ TCPRetransFail *float64
+ TCPRcvCoalesce *float64
+ TCPRcvQDrop *float64
+ TCPOFOQueue *float64
+ TCPOFODrop *float64
+ TCPOFOMerge *float64
+ TCPChallengeACK *float64
+ TCPSYNChallenge *float64
+ TCPFastOpenActive *float64
+ TCPFastOpenActiveFail *float64
+ TCPFastOpenPassive *float64
+ TCPFastOpenPassiveFail *float64
+ TCPFastOpenListenOverflow *float64
+ TCPFastOpenCookieReqd *float64
+ TCPFastOpenBlackhole *float64
+ TCPSpuriousRtxHostQueues *float64
+ BusyPollRxPackets *float64
+ TCPAutoCorking *float64
+ TCPFromZeroWindowAdv *float64
+ TCPToZeroWindowAdv *float64
+ TCPWantZeroWindowAdv *float64
+ TCPSynRetrans *float64
+ TCPOrigDataSent *float64
+ TCPHystartTrainDetect *float64
+ TCPHystartTrainCwnd *float64
+ TCPHystartDelayDetect *float64
+ TCPHystartDelayCwnd *float64
+ TCPACKSkippedSynRecv *float64
+ TCPACKSkippedPAWS *float64
+ TCPACKSkippedSeq *float64
+ TCPACKSkippedFinWait2 *float64
+ TCPACKSkippedTimeWait *float64
+ TCPACKSkippedChallenge *float64
+ TCPWinProbe *float64
+ TCPKeepAlive *float64
+ TCPMTUPFail *float64
+ TCPMTUPSuccess *float64
+ TCPWqueueTooBig *float64
}
type IpExt struct { // nolint:revive
- InNoRoutes float64
- InTruncatedPkts float64
- InMcastPkts float64
- OutMcastPkts float64
- InBcastPkts float64
- OutBcastPkts float64
- InOctets float64
- OutOctets float64
- InMcastOctets float64
- OutMcastOctets float64
- InBcastOctets float64
- OutBcastOctets float64
- InCsumErrors float64
- InNoECTPkts float64
- InECT1Pkts float64
- InECT0Pkts float64
- InCEPkts float64
- ReasmOverlaps float64
+ InNoRoutes *float64
+ InTruncatedPkts *float64
+ InMcastPkts *float64
+ OutMcastPkts *float64
+ InBcastPkts *float64
+ OutBcastPkts *float64
+ InOctets *float64
+ OutOctets *float64
+ InMcastOctets *float64
+ OutMcastOctets *float64
+ InBcastOctets *float64
+ OutBcastOctets *float64
+ InCsumErrors *float64
+ InNoECTPkts *float64
+ InECT1Pkts *float64
+ InECT0Pkts *float64
+ InCEPkts *float64
+ ReasmOverlaps *float64
}
func (p Proc) Netstat() (ProcNetstat, error) {
@@ -174,14 +175,14 @@ func (p Proc) Netstat() (ProcNetstat, error) {
if err != nil {
return ProcNetstat{PID: p.PID}, err
}
- procNetstat, err := parseNetstat(bytes.NewReader(data), filename)
+ procNetstat, err := parseProcNetstat(bytes.NewReader(data), filename)
procNetstat.PID = p.PID
return procNetstat, err
}
-// parseNetstat parses the metrics from proc//net/netstat file
+// parseProcNetstat parses the metrics from proc//net/netstat file
// and returns a ProcNetstat structure.
-func parseNetstat(r io.Reader, fileName string) (ProcNetstat, error) {
+func parseProcNetstat(r io.Reader, fileName string) (ProcNetstat, error) {
var (
scanner = bufio.NewScanner(r)
procNetstat = ProcNetstat{}
@@ -208,230 +209,232 @@ func parseNetstat(r io.Reader, fileName string) (ProcNetstat, error) {
case "TcpExt":
switch key {
case "SyncookiesSent":
- procNetstat.TcpExt.SyncookiesSent = value
+ procNetstat.TcpExt.SyncookiesSent = &value
case "SyncookiesRecv":
- procNetstat.TcpExt.SyncookiesRecv = value
+ procNetstat.TcpExt.SyncookiesRecv = &value
case "SyncookiesFailed":
- procNetstat.TcpExt.SyncookiesFailed = value
+ procNetstat.TcpExt.SyncookiesFailed = &value
case "EmbryonicRsts":
- procNetstat.TcpExt.EmbryonicRsts = value
+ procNetstat.TcpExt.EmbryonicRsts = &value
case "PruneCalled":
- procNetstat.TcpExt.PruneCalled = value
+ procNetstat.TcpExt.PruneCalled = &value
case "RcvPruned":
- procNetstat.TcpExt.RcvPruned = value
+ procNetstat.TcpExt.RcvPruned = &value
case "OfoPruned":
- procNetstat.TcpExt.OfoPruned = value
+ procNetstat.TcpExt.OfoPruned = &value
case "OutOfWindowIcmps":
- procNetstat.TcpExt.OutOfWindowIcmps = value
+ procNetstat.TcpExt.OutOfWindowIcmps = &value
case "LockDroppedIcmps":
- procNetstat.TcpExt.LockDroppedIcmps = value
+ procNetstat.TcpExt.LockDroppedIcmps = &value
case "ArpFilter":
- procNetstat.TcpExt.ArpFilter = value
+ procNetstat.TcpExt.ArpFilter = &value
case "TW":
- procNetstat.TcpExt.TW = value
+ procNetstat.TcpExt.TW = &value
case "TWRecycled":
- procNetstat.TcpExt.TWRecycled = value
+ procNetstat.TcpExt.TWRecycled = &value
case "TWKilled":
- procNetstat.TcpExt.TWKilled = value
+ procNetstat.TcpExt.TWKilled = &value
case "PAWSActive":
- procNetstat.TcpExt.PAWSActive = value
+ procNetstat.TcpExt.PAWSActive = &value
case "PAWSEstab":
- procNetstat.TcpExt.PAWSEstab = value
+ procNetstat.TcpExt.PAWSEstab = &value
case "DelayedACKs":
- procNetstat.TcpExt.DelayedACKs = value
+ procNetstat.TcpExt.DelayedACKs = &value
case "DelayedACKLocked":
- procNetstat.TcpExt.DelayedACKLocked = value
+ procNetstat.TcpExt.DelayedACKLocked = &value
case "DelayedACKLost":
- procNetstat.TcpExt.DelayedACKLost = value
+ procNetstat.TcpExt.DelayedACKLost = &value
case "ListenOverflows":
- procNetstat.TcpExt.ListenOverflows = value
+ procNetstat.TcpExt.ListenOverflows = &value
case "ListenDrops":
- procNetstat.TcpExt.ListenDrops = value
+ procNetstat.TcpExt.ListenDrops = &value
case "TCPHPHits":
- procNetstat.TcpExt.TCPHPHits = value
+ procNetstat.TcpExt.TCPHPHits = &value
case "TCPPureAcks":
- procNetstat.TcpExt.TCPPureAcks = value
+ procNetstat.TcpExt.TCPPureAcks = &value
case "TCPHPAcks":
- procNetstat.TcpExt.TCPHPAcks = value
+ procNetstat.TcpExt.TCPHPAcks = &value
case "TCPRenoRecovery":
- procNetstat.TcpExt.TCPRenoRecovery = value
+ procNetstat.TcpExt.TCPRenoRecovery = &value
case "TCPSackRecovery":
- procNetstat.TcpExt.TCPSackRecovery = value
+ procNetstat.TcpExt.TCPSackRecovery = &value
case "TCPSACKReneging":
- procNetstat.TcpExt.TCPSACKReneging = value
+ procNetstat.TcpExt.TCPSACKReneging = &value
case "TCPSACKReorder":
- procNetstat.TcpExt.TCPSACKReorder = value
+ procNetstat.TcpExt.TCPSACKReorder = &value
case "TCPRenoReorder":
- procNetstat.TcpExt.TCPRenoReorder = value
+ procNetstat.TcpExt.TCPRenoReorder = &value
case "TCPTSReorder":
- procNetstat.TcpExt.TCPTSReorder = value
+ procNetstat.TcpExt.TCPTSReorder = &value
case "TCPFullUndo":
- procNetstat.TcpExt.TCPFullUndo = value
+ procNetstat.TcpExt.TCPFullUndo = &value
case "TCPPartialUndo":
- procNetstat.TcpExt.TCPPartialUndo = value
+ procNetstat.TcpExt.TCPPartialUndo = &value
case "TCPDSACKUndo":
- procNetstat.TcpExt.TCPDSACKUndo = value
+ procNetstat.TcpExt.TCPDSACKUndo = &value
case "TCPLossUndo":
- procNetstat.TcpExt.TCPLossUndo = value
+ procNetstat.TcpExt.TCPLossUndo = &value
case "TCPLostRetransmit":
- procNetstat.TcpExt.TCPLostRetransmit = value
+ procNetstat.TcpExt.TCPLostRetransmit = &value
case "TCPRenoFailures":
- procNetstat.TcpExt.TCPRenoFailures = value
+ procNetstat.TcpExt.TCPRenoFailures = &value
case "TCPSackFailures":
- procNetstat.TcpExt.TCPSackFailures = value
+ procNetstat.TcpExt.TCPSackFailures = &value
case "TCPLossFailures":
- procNetstat.TcpExt.TCPLossFailures = value
+ procNetstat.TcpExt.TCPLossFailures = &value
case "TCPFastRetrans":
- procNetstat.TcpExt.TCPFastRetrans = value
+ procNetstat.TcpExt.TCPFastRetrans = &value
case "TCPSlowStartRetrans":
- procNetstat.TcpExt.TCPSlowStartRetrans = value
+ procNetstat.TcpExt.TCPSlowStartRetrans = &value
case "TCPTimeouts":
- procNetstat.TcpExt.TCPTimeouts = value
+ procNetstat.TcpExt.TCPTimeouts = &value
case "TCPLossProbes":
- procNetstat.TcpExt.TCPLossProbes = value
+ procNetstat.TcpExt.TCPLossProbes = &value
case "TCPLossProbeRecovery":
- procNetstat.TcpExt.TCPLossProbeRecovery = value
+ procNetstat.TcpExt.TCPLossProbeRecovery = &value
case "TCPRenoRecoveryFail":
- procNetstat.TcpExt.TCPRenoRecoveryFail = value
+ procNetstat.TcpExt.TCPRenoRecoveryFail = &value
case "TCPSackRecoveryFail":
- procNetstat.TcpExt.TCPSackRecoveryFail = value
+ procNetstat.TcpExt.TCPSackRecoveryFail = &value
case "TCPRcvCollapsed":
- procNetstat.TcpExt.TCPRcvCollapsed = value
+ procNetstat.TcpExt.TCPRcvCollapsed = &value
case "TCPDSACKOldSent":
- procNetstat.TcpExt.TCPDSACKOldSent = value
+ procNetstat.TcpExt.TCPDSACKOldSent = &value
case "TCPDSACKOfoSent":
- procNetstat.TcpExt.TCPDSACKOfoSent = value
+ procNetstat.TcpExt.TCPDSACKOfoSent = &value
case "TCPDSACKRecv":
- procNetstat.TcpExt.TCPDSACKRecv = value
+ procNetstat.TcpExt.TCPDSACKRecv = &value
case "TCPDSACKOfoRecv":
- procNetstat.TcpExt.TCPDSACKOfoRecv = value
+ procNetstat.TcpExt.TCPDSACKOfoRecv = &value
case "TCPAbortOnData":
- procNetstat.TcpExt.TCPAbortOnData = value
+ procNetstat.TcpExt.TCPAbortOnData = &value
case "TCPAbortOnClose":
- procNetstat.TcpExt.TCPAbortOnClose = value
+ procNetstat.TcpExt.TCPAbortOnClose = &value
case "TCPDeferAcceptDrop":
- procNetstat.TcpExt.TCPDeferAcceptDrop = value
+ procNetstat.TcpExt.TCPDeferAcceptDrop = &value
case "IPReversePathFilter":
- procNetstat.TcpExt.IPReversePathFilter = value
+ procNetstat.TcpExt.IPReversePathFilter = &value
case "TCPTimeWaitOverflow":
- procNetstat.TcpExt.TCPTimeWaitOverflow = value
+ procNetstat.TcpExt.TCPTimeWaitOverflow = &value
case "TCPReqQFullDoCookies":
- procNetstat.TcpExt.TCPReqQFullDoCookies = value
+ procNetstat.TcpExt.TCPReqQFullDoCookies = &value
case "TCPReqQFullDrop":
- procNetstat.TcpExt.TCPReqQFullDrop = value
+ procNetstat.TcpExt.TCPReqQFullDrop = &value
case "TCPRetransFail":
- procNetstat.TcpExt.TCPRetransFail = value
+ procNetstat.TcpExt.TCPRetransFail = &value
case "TCPRcvCoalesce":
- procNetstat.TcpExt.TCPRcvCoalesce = value
+ procNetstat.TcpExt.TCPRcvCoalesce = &value
+ case "TCPRcvQDrop":
+ procNetstat.TcpExt.TCPRcvQDrop = &value
case "TCPOFOQueue":
- procNetstat.TcpExt.TCPOFOQueue = value
+ procNetstat.TcpExt.TCPOFOQueue = &value
case "TCPOFODrop":
- procNetstat.TcpExt.TCPOFODrop = value
+ procNetstat.TcpExt.TCPOFODrop = &value
case "TCPOFOMerge":
- procNetstat.TcpExt.TCPOFOMerge = value
+ procNetstat.TcpExt.TCPOFOMerge = &value
case "TCPChallengeACK":
- procNetstat.TcpExt.TCPChallengeACK = value
+ procNetstat.TcpExt.TCPChallengeACK = &value
case "TCPSYNChallenge":
- procNetstat.TcpExt.TCPSYNChallenge = value
+ procNetstat.TcpExt.TCPSYNChallenge = &value
case "TCPFastOpenActive":
- procNetstat.TcpExt.TCPFastOpenActive = value
+ procNetstat.TcpExt.TCPFastOpenActive = &value
case "TCPFastOpenActiveFail":
- procNetstat.TcpExt.TCPFastOpenActiveFail = value
+ procNetstat.TcpExt.TCPFastOpenActiveFail = &value
case "TCPFastOpenPassive":
- procNetstat.TcpExt.TCPFastOpenPassive = value
+ procNetstat.TcpExt.TCPFastOpenPassive = &value
case "TCPFastOpenPassiveFail":
- procNetstat.TcpExt.TCPFastOpenPassiveFail = value
+ procNetstat.TcpExt.TCPFastOpenPassiveFail = &value
case "TCPFastOpenListenOverflow":
- procNetstat.TcpExt.TCPFastOpenListenOverflow = value
+ procNetstat.TcpExt.TCPFastOpenListenOverflow = &value
case "TCPFastOpenCookieReqd":
- procNetstat.TcpExt.TCPFastOpenCookieReqd = value
+ procNetstat.TcpExt.TCPFastOpenCookieReqd = &value
case "TCPFastOpenBlackhole":
- procNetstat.TcpExt.TCPFastOpenBlackhole = value
+ procNetstat.TcpExt.TCPFastOpenBlackhole = &value
case "TCPSpuriousRtxHostQueues":
- procNetstat.TcpExt.TCPSpuriousRtxHostQueues = value
+ procNetstat.TcpExt.TCPSpuriousRtxHostQueues = &value
case "BusyPollRxPackets":
- procNetstat.TcpExt.BusyPollRxPackets = value
+ procNetstat.TcpExt.BusyPollRxPackets = &value
case "TCPAutoCorking":
- procNetstat.TcpExt.TCPAutoCorking = value
+ procNetstat.TcpExt.TCPAutoCorking = &value
case "TCPFromZeroWindowAdv":
- procNetstat.TcpExt.TCPFromZeroWindowAdv = value
+ procNetstat.TcpExt.TCPFromZeroWindowAdv = &value
case "TCPToZeroWindowAdv":
- procNetstat.TcpExt.TCPToZeroWindowAdv = value
+ procNetstat.TcpExt.TCPToZeroWindowAdv = &value
case "TCPWantZeroWindowAdv":
- procNetstat.TcpExt.TCPWantZeroWindowAdv = value
+ procNetstat.TcpExt.TCPWantZeroWindowAdv = &value
case "TCPSynRetrans":
- procNetstat.TcpExt.TCPSynRetrans = value
+ procNetstat.TcpExt.TCPSynRetrans = &value
case "TCPOrigDataSent":
- procNetstat.TcpExt.TCPOrigDataSent = value
+ procNetstat.TcpExt.TCPOrigDataSent = &value
case "TCPHystartTrainDetect":
- procNetstat.TcpExt.TCPHystartTrainDetect = value
+ procNetstat.TcpExt.TCPHystartTrainDetect = &value
case "TCPHystartTrainCwnd":
- procNetstat.TcpExt.TCPHystartTrainCwnd = value
+ procNetstat.TcpExt.TCPHystartTrainCwnd = &value
case "TCPHystartDelayDetect":
- procNetstat.TcpExt.TCPHystartDelayDetect = value
+ procNetstat.TcpExt.TCPHystartDelayDetect = &value
case "TCPHystartDelayCwnd":
- procNetstat.TcpExt.TCPHystartDelayCwnd = value
+ procNetstat.TcpExt.TCPHystartDelayCwnd = &value
case "TCPACKSkippedSynRecv":
- procNetstat.TcpExt.TCPACKSkippedSynRecv = value
+ procNetstat.TcpExt.TCPACKSkippedSynRecv = &value
case "TCPACKSkippedPAWS":
- procNetstat.TcpExt.TCPACKSkippedPAWS = value
+ procNetstat.TcpExt.TCPACKSkippedPAWS = &value
case "TCPACKSkippedSeq":
- procNetstat.TcpExt.TCPACKSkippedSeq = value
+ procNetstat.TcpExt.TCPACKSkippedSeq = &value
case "TCPACKSkippedFinWait2":
- procNetstat.TcpExt.TCPACKSkippedFinWait2 = value
+ procNetstat.TcpExt.TCPACKSkippedFinWait2 = &value
case "TCPACKSkippedTimeWait":
- procNetstat.TcpExt.TCPACKSkippedTimeWait = value
+ procNetstat.TcpExt.TCPACKSkippedTimeWait = &value
case "TCPACKSkippedChallenge":
- procNetstat.TcpExt.TCPACKSkippedChallenge = value
+ procNetstat.TcpExt.TCPACKSkippedChallenge = &value
case "TCPWinProbe":
- procNetstat.TcpExt.TCPWinProbe = value
+ procNetstat.TcpExt.TCPWinProbe = &value
case "TCPKeepAlive":
- procNetstat.TcpExt.TCPKeepAlive = value
+ procNetstat.TcpExt.TCPKeepAlive = &value
case "TCPMTUPFail":
- procNetstat.TcpExt.TCPMTUPFail = value
+ procNetstat.TcpExt.TCPMTUPFail = &value
case "TCPMTUPSuccess":
- procNetstat.TcpExt.TCPMTUPSuccess = value
+ procNetstat.TcpExt.TCPMTUPSuccess = &value
case "TCPWqueueTooBig":
- procNetstat.TcpExt.TCPWqueueTooBig = value
+ procNetstat.TcpExt.TCPWqueueTooBig = &value
}
case "IpExt":
switch key {
case "InNoRoutes":
- procNetstat.IpExt.InNoRoutes = value
+ procNetstat.IpExt.InNoRoutes = &value
case "InTruncatedPkts":
- procNetstat.IpExt.InTruncatedPkts = value
+ procNetstat.IpExt.InTruncatedPkts = &value
case "InMcastPkts":
- procNetstat.IpExt.InMcastPkts = value
+ procNetstat.IpExt.InMcastPkts = &value
case "OutMcastPkts":
- procNetstat.IpExt.OutMcastPkts = value
+ procNetstat.IpExt.OutMcastPkts = &value
case "InBcastPkts":
- procNetstat.IpExt.InBcastPkts = value
+ procNetstat.IpExt.InBcastPkts = &value
case "OutBcastPkts":
- procNetstat.IpExt.OutBcastPkts = value
+ procNetstat.IpExt.OutBcastPkts = &value
case "InOctets":
- procNetstat.IpExt.InOctets = value
+ procNetstat.IpExt.InOctets = &value
case "OutOctets":
- procNetstat.IpExt.OutOctets = value
+ procNetstat.IpExt.OutOctets = &value
case "InMcastOctets":
- procNetstat.IpExt.InMcastOctets = value
+ procNetstat.IpExt.InMcastOctets = &value
case "OutMcastOctets":
- procNetstat.IpExt.OutMcastOctets = value
+ procNetstat.IpExt.OutMcastOctets = &value
case "InBcastOctets":
- procNetstat.IpExt.InBcastOctets = value
+ procNetstat.IpExt.InBcastOctets = &value
case "OutBcastOctets":
- procNetstat.IpExt.OutBcastOctets = value
+ procNetstat.IpExt.OutBcastOctets = &value
case "InCsumErrors":
- procNetstat.IpExt.InCsumErrors = value
+ procNetstat.IpExt.InCsumErrors = &value
case "InNoECTPkts":
- procNetstat.IpExt.InNoECTPkts = value
+ procNetstat.IpExt.InNoECTPkts = &value
case "InECT1Pkts":
- procNetstat.IpExt.InECT1Pkts = value
+ procNetstat.IpExt.InECT1Pkts = &value
case "InECT0Pkts":
- procNetstat.IpExt.InECT0Pkts = value
+ procNetstat.IpExt.InECT0Pkts = &value
case "InCEPkts":
- procNetstat.IpExt.InCEPkts = value
+ procNetstat.IpExt.InCEPkts = &value
case "ReasmOverlaps":
- procNetstat.IpExt.ReasmOverlaps = value
+ procNetstat.IpExt.ReasmOverlaps = &value
}
}
}
diff --git a/vendor/github.com/prometheus/procfs/proc_snmp.go b/vendor/github.com/prometheus/procfs/proc_snmp.go
index ae191896cb..6c46b71884 100644
--- a/vendor/github.com/prometheus/procfs/proc_snmp.go
+++ b/vendor/github.com/prometheus/procfs/proc_snmp.go
@@ -37,100 +37,100 @@ type ProcSnmp struct {
}
type Ip struct { // nolint:revive
- Forwarding float64
- DefaultTTL float64
- InReceives float64
- InHdrErrors float64
- InAddrErrors float64
- ForwDatagrams float64
- InUnknownProtos float64
- InDiscards float64
- InDelivers float64
- OutRequests float64
- OutDiscards float64
- OutNoRoutes float64
- ReasmTimeout float64
- ReasmReqds float64
- ReasmOKs float64
- ReasmFails float64
- FragOKs float64
- FragFails float64
- FragCreates float64
+ Forwarding *float64
+ DefaultTTL *float64
+ InReceives *float64
+ InHdrErrors *float64
+ InAddrErrors *float64
+ ForwDatagrams *float64
+ InUnknownProtos *float64
+ InDiscards *float64
+ InDelivers *float64
+ OutRequests *float64
+ OutDiscards *float64
+ OutNoRoutes *float64
+ ReasmTimeout *float64
+ ReasmReqds *float64
+ ReasmOKs *float64
+ ReasmFails *float64
+ FragOKs *float64
+ FragFails *float64
+ FragCreates *float64
}
-type Icmp struct {
- InMsgs float64
- InErrors float64
- InCsumErrors float64
- InDestUnreachs float64
- InTimeExcds float64
- InParmProbs float64
- InSrcQuenchs float64
- InRedirects float64
- InEchos float64
- InEchoReps float64
- InTimestamps float64
- InTimestampReps float64
- InAddrMasks float64
- InAddrMaskReps float64
- OutMsgs float64
- OutErrors float64
- OutDestUnreachs float64
- OutTimeExcds float64
- OutParmProbs float64
- OutSrcQuenchs float64
- OutRedirects float64
- OutEchos float64
- OutEchoReps float64
- OutTimestamps float64
- OutTimestampReps float64
- OutAddrMasks float64
- OutAddrMaskReps float64
+type Icmp struct { // nolint:revive
+ InMsgs *float64
+ InErrors *float64
+ InCsumErrors *float64
+ InDestUnreachs *float64
+ InTimeExcds *float64
+ InParmProbs *float64
+ InSrcQuenchs *float64
+ InRedirects *float64
+ InEchos *float64
+ InEchoReps *float64
+ InTimestamps *float64
+ InTimestampReps *float64
+ InAddrMasks *float64
+ InAddrMaskReps *float64
+ OutMsgs *float64
+ OutErrors *float64
+ OutDestUnreachs *float64
+ OutTimeExcds *float64
+ OutParmProbs *float64
+ OutSrcQuenchs *float64
+ OutRedirects *float64
+ OutEchos *float64
+ OutEchoReps *float64
+ OutTimestamps *float64
+ OutTimestampReps *float64
+ OutAddrMasks *float64
+ OutAddrMaskReps *float64
}
type IcmpMsg struct {
- InType3 float64
- OutType3 float64
+ InType3 *float64
+ OutType3 *float64
}
type Tcp struct { // nolint:revive
- RtoAlgorithm float64
- RtoMin float64
- RtoMax float64
- MaxConn float64
- ActiveOpens float64
- PassiveOpens float64
- AttemptFails float64
- EstabResets float64
- CurrEstab float64
- InSegs float64
- OutSegs float64
- RetransSegs float64
- InErrs float64
- OutRsts float64
- InCsumErrors float64
+ RtoAlgorithm *float64
+ RtoMin *float64
+ RtoMax *float64
+ MaxConn *float64
+ ActiveOpens *float64
+ PassiveOpens *float64
+ AttemptFails *float64
+ EstabResets *float64
+ CurrEstab *float64
+ InSegs *float64
+ OutSegs *float64
+ RetransSegs *float64
+ InErrs *float64
+ OutRsts *float64
+ InCsumErrors *float64
}
type Udp struct { // nolint:revive
- InDatagrams float64
- NoPorts float64
- InErrors float64
- OutDatagrams float64
- RcvbufErrors float64
- SndbufErrors float64
- InCsumErrors float64
- IgnoredMulti float64
+ InDatagrams *float64
+ NoPorts *float64
+ InErrors *float64
+ OutDatagrams *float64
+ RcvbufErrors *float64
+ SndbufErrors *float64
+ InCsumErrors *float64
+ IgnoredMulti *float64
}
type UdpLite struct { // nolint:revive
- InDatagrams float64
- NoPorts float64
- InErrors float64
- OutDatagrams float64
- RcvbufErrors float64
- SndbufErrors float64
- InCsumErrors float64
- IgnoredMulti float64
+ InDatagrams *float64
+ NoPorts *float64
+ InErrors *float64
+ OutDatagrams *float64
+ RcvbufErrors *float64
+ SndbufErrors *float64
+ InCsumErrors *float64
+ IgnoredMulti *float64
}
func (p Proc) Snmp() (ProcSnmp, error) {
@@ -173,178 +173,178 @@ func parseSnmp(r io.Reader, fileName string) (ProcSnmp, error) {
case "Ip":
switch key {
case "Forwarding":
- procSnmp.Ip.Forwarding = value
+ procSnmp.Ip.Forwarding = &value
case "DefaultTTL":
- procSnmp.Ip.DefaultTTL = value
+ procSnmp.Ip.DefaultTTL = &value
case "InReceives":
- procSnmp.Ip.InReceives = value
+ procSnmp.Ip.InReceives = &value
case "InHdrErrors":
- procSnmp.Ip.InHdrErrors = value
+ procSnmp.Ip.InHdrErrors = &value
case "InAddrErrors":
- procSnmp.Ip.InAddrErrors = value
+ procSnmp.Ip.InAddrErrors = &value
case "ForwDatagrams":
- procSnmp.Ip.ForwDatagrams = value
+ procSnmp.Ip.ForwDatagrams = &value
case "InUnknownProtos":
- procSnmp.Ip.InUnknownProtos = value
+ procSnmp.Ip.InUnknownProtos = &value
case "InDiscards":
- procSnmp.Ip.InDiscards = value
+ procSnmp.Ip.InDiscards = &value
case "InDelivers":
- procSnmp.Ip.InDelivers = value
+ procSnmp.Ip.InDelivers = &value
case "OutRequests":
- procSnmp.Ip.OutRequests = value
+ procSnmp.Ip.OutRequests = &value
case "OutDiscards":
- procSnmp.Ip.OutDiscards = value
+ procSnmp.Ip.OutDiscards = &value
case "OutNoRoutes":
- procSnmp.Ip.OutNoRoutes = value
+ procSnmp.Ip.OutNoRoutes = &value
case "ReasmTimeout":
- procSnmp.Ip.ReasmTimeout = value
+ procSnmp.Ip.ReasmTimeout = &value
case "ReasmReqds":
- procSnmp.Ip.ReasmReqds = value
+ procSnmp.Ip.ReasmReqds = &value
case "ReasmOKs":
- procSnmp.Ip.ReasmOKs = value
+ procSnmp.Ip.ReasmOKs = &value
case "ReasmFails":
- procSnmp.Ip.ReasmFails = value
+ procSnmp.Ip.ReasmFails = &value
case "FragOKs":
- procSnmp.Ip.FragOKs = value
+ procSnmp.Ip.FragOKs = &value
case "FragFails":
- procSnmp.Ip.FragFails = value
+ procSnmp.Ip.FragFails = &value
case "FragCreates":
- procSnmp.Ip.FragCreates = value
+ procSnmp.Ip.FragCreates = &value
}
case "Icmp":
switch key {
case "InMsgs":
- procSnmp.Icmp.InMsgs = value
+ procSnmp.Icmp.InMsgs = &value
case "InErrors":
- procSnmp.Icmp.InErrors = value
+ procSnmp.Icmp.InErrors = &value
case "InCsumErrors":
- procSnmp.Icmp.InCsumErrors = value
+ procSnmp.Icmp.InCsumErrors = &value
case "InDestUnreachs":
- procSnmp.Icmp.InDestUnreachs = value
+ procSnmp.Icmp.InDestUnreachs = &value
case "InTimeExcds":
- procSnmp.Icmp.InTimeExcds = value
+ procSnmp.Icmp.InTimeExcds = &value
case "InParmProbs":
- procSnmp.Icmp.InParmProbs = value
+ procSnmp.Icmp.InParmProbs = &value
case "InSrcQuenchs":
- procSnmp.Icmp.InSrcQuenchs = value
+ procSnmp.Icmp.InSrcQuenchs = &value
case "InRedirects":
- procSnmp.Icmp.InRedirects = value
+ procSnmp.Icmp.InRedirects = &value
case "InEchos":
- procSnmp.Icmp.InEchos = value
+ procSnmp.Icmp.InEchos = &value
case "InEchoReps":
- procSnmp.Icmp.InEchoReps = value
+ procSnmp.Icmp.InEchoReps = &value
case "InTimestamps":
- procSnmp.Icmp.InTimestamps = value
+ procSnmp.Icmp.InTimestamps = &value
case "InTimestampReps":
- procSnmp.Icmp.InTimestampReps = value
+ procSnmp.Icmp.InTimestampReps = &value
case "InAddrMasks":
- procSnmp.Icmp.InAddrMasks = value
+ procSnmp.Icmp.InAddrMasks = &value
case "InAddrMaskReps":
- procSnmp.Icmp.InAddrMaskReps = value
+ procSnmp.Icmp.InAddrMaskReps = &value
case "OutMsgs":
- procSnmp.Icmp.OutMsgs = value
+ procSnmp.Icmp.OutMsgs = &value
case "OutErrors":
- procSnmp.Icmp.OutErrors = value
+ procSnmp.Icmp.OutErrors = &value
case "OutDestUnreachs":
- procSnmp.Icmp.OutDestUnreachs = value
+ procSnmp.Icmp.OutDestUnreachs = &value
case "OutTimeExcds":
- procSnmp.Icmp.OutTimeExcds = value
+ procSnmp.Icmp.OutTimeExcds = &value
case "OutParmProbs":
- procSnmp.Icmp.OutParmProbs = value
+ procSnmp.Icmp.OutParmProbs = &value
case "OutSrcQuenchs":
- procSnmp.Icmp.OutSrcQuenchs = value
+ procSnmp.Icmp.OutSrcQuenchs = &value
case "OutRedirects":
- procSnmp.Icmp.OutRedirects = value
+ procSnmp.Icmp.OutRedirects = &value
case "OutEchos":
- procSnmp.Icmp.OutEchos = value
+ procSnmp.Icmp.OutEchos = &value
case "OutEchoReps":
- procSnmp.Icmp.OutEchoReps = value
+ procSnmp.Icmp.OutEchoReps = &value
case "OutTimestamps":
- procSnmp.Icmp.OutTimestamps = value
+ procSnmp.Icmp.OutTimestamps = &value
case "OutTimestampReps":
- procSnmp.Icmp.OutTimestampReps = value
+ procSnmp.Icmp.OutTimestampReps = &value
case "OutAddrMasks":
- procSnmp.Icmp.OutAddrMasks = value
+ procSnmp.Icmp.OutAddrMasks = &value
case "OutAddrMaskReps":
- procSnmp.Icmp.OutAddrMaskReps = value
+ procSnmp.Icmp.OutAddrMaskReps = &value
}
case "IcmpMsg":
switch key {
case "InType3":
- procSnmp.IcmpMsg.InType3 = value
+ procSnmp.IcmpMsg.InType3 = &value
case "OutType3":
- procSnmp.IcmpMsg.OutType3 = value
+ procSnmp.IcmpMsg.OutType3 = &value
}
case "Tcp":
switch key {
case "RtoAlgorithm":
- procSnmp.Tcp.RtoAlgorithm = value
+ procSnmp.Tcp.RtoAlgorithm = &value
case "RtoMin":
- procSnmp.Tcp.RtoMin = value
+ procSnmp.Tcp.RtoMin = &value
case "RtoMax":
- procSnmp.Tcp.RtoMax = value
+ procSnmp.Tcp.RtoMax = &value
case "MaxConn":
- procSnmp.Tcp.MaxConn = value
+ procSnmp.Tcp.MaxConn = &value
case "ActiveOpens":
- procSnmp.Tcp.ActiveOpens = value
+ procSnmp.Tcp.ActiveOpens = &value
case "PassiveOpens":
- procSnmp.Tcp.PassiveOpens = value
+ procSnmp.Tcp.PassiveOpens = &value
case "AttemptFails":
- procSnmp.Tcp.AttemptFails = value
+ procSnmp.Tcp.AttemptFails = &value
case "EstabResets":
- procSnmp.Tcp.EstabResets = value
+ procSnmp.Tcp.EstabResets = &value
case "CurrEstab":
- procSnmp.Tcp.CurrEstab = value
+ procSnmp.Tcp.CurrEstab = &value
case "InSegs":
- procSnmp.Tcp.InSegs = value
+ procSnmp.Tcp.InSegs = &value
case "OutSegs":
- procSnmp.Tcp.OutSegs = value
+ procSnmp.Tcp.OutSegs = &value
case "RetransSegs":
- procSnmp.Tcp.RetransSegs = value
+ procSnmp.Tcp.RetransSegs = &value
case "InErrs":
- procSnmp.Tcp.InErrs = value
+ procSnmp.Tcp.InErrs = &value
case "OutRsts":
- procSnmp.Tcp.OutRsts = value
+ procSnmp.Tcp.OutRsts = &value
case "InCsumErrors":
- procSnmp.Tcp.InCsumErrors = value
+ procSnmp.Tcp.InCsumErrors = &value
}
case "Udp":
switch key {
case "InDatagrams":
- procSnmp.Udp.InDatagrams = value
+ procSnmp.Udp.InDatagrams = &value
case "NoPorts":
- procSnmp.Udp.NoPorts = value
+ procSnmp.Udp.NoPorts = &value
case "InErrors":
- procSnmp.Udp.InErrors = value
+ procSnmp.Udp.InErrors = &value
case "OutDatagrams":
- procSnmp.Udp.OutDatagrams = value
+ procSnmp.Udp.OutDatagrams = &value
case "RcvbufErrors":
- procSnmp.Udp.RcvbufErrors = value
+ procSnmp.Udp.RcvbufErrors = &value
case "SndbufErrors":
- procSnmp.Udp.SndbufErrors = value
+ procSnmp.Udp.SndbufErrors = &value
case "InCsumErrors":
- procSnmp.Udp.InCsumErrors = value
+ procSnmp.Udp.InCsumErrors = &value
case "IgnoredMulti":
- procSnmp.Udp.IgnoredMulti = value
+ procSnmp.Udp.IgnoredMulti = &value
}
case "UdpLite":
switch key {
case "InDatagrams":
- procSnmp.UdpLite.InDatagrams = value
+ procSnmp.UdpLite.InDatagrams = &value
case "NoPorts":
- procSnmp.UdpLite.NoPorts = value
+ procSnmp.UdpLite.NoPorts = &value
case "InErrors":
- procSnmp.UdpLite.InErrors = value
+ procSnmp.UdpLite.InErrors = &value
case "OutDatagrams":
- procSnmp.UdpLite.OutDatagrams = value
+ procSnmp.UdpLite.OutDatagrams = &value
case "RcvbufErrors":
- procSnmp.UdpLite.RcvbufErrors = value
+ procSnmp.UdpLite.RcvbufErrors = &value
case "SndbufErrors":
- procSnmp.UdpLite.SndbufErrors = value
+ procSnmp.UdpLite.SndbufErrors = &value
case "InCsumErrors":
- procSnmp.UdpLite.InCsumErrors = value
+ procSnmp.UdpLite.InCsumErrors = &value
case "IgnoredMulti":
- procSnmp.UdpLite.IgnoredMulti = value
+ procSnmp.UdpLite.IgnoredMulti = &value
}
}
}
diff --git a/vendor/github.com/prometheus/procfs/proc_snmp6.go b/vendor/github.com/prometheus/procfs/proc_snmp6.go
index f611992d52..3059cc6a13 100644
--- a/vendor/github.com/prometheus/procfs/proc_snmp6.go
+++ b/vendor/github.com/prometheus/procfs/proc_snmp6.go
@@ -36,106 +36,106 @@ type ProcSnmp6 struct {
}
type Ip6 struct { // nolint:revive
- InReceives float64
- InHdrErrors float64
- InTooBigErrors float64
- InNoRoutes float64
- InAddrErrors float64
- InUnknownProtos float64
- InTruncatedPkts float64
- InDiscards float64
- InDelivers float64
- OutForwDatagrams float64
- OutRequests float64
- OutDiscards float64
- OutNoRoutes float64
- ReasmTimeout float64
- ReasmReqds float64
- ReasmOKs float64
- ReasmFails float64
- FragOKs float64
- FragFails float64
- FragCreates float64
- InMcastPkts float64
- OutMcastPkts float64
- InOctets float64
- OutOctets float64
- InMcastOctets float64
- OutMcastOctets float64
- InBcastOctets float64
- OutBcastOctets float64
- InNoECTPkts float64
- InECT1Pkts float64
- InECT0Pkts float64
- InCEPkts float64
+ InReceives *float64
+ InHdrErrors *float64
+ InTooBigErrors *float64
+ InNoRoutes *float64
+ InAddrErrors *float64
+ InUnknownProtos *float64
+ InTruncatedPkts *float64
+ InDiscards *float64
+ InDelivers *float64
+ OutForwDatagrams *float64
+ OutRequests *float64
+ OutDiscards *float64
+ OutNoRoutes *float64
+ ReasmTimeout *float64
+ ReasmReqds *float64
+ ReasmOKs *float64
+ ReasmFails *float64
+ FragOKs *float64
+ FragFails *float64
+ FragCreates *float64
+ InMcastPkts *float64
+ OutMcastPkts *float64
+ InOctets *float64
+ OutOctets *float64
+ InMcastOctets *float64
+ OutMcastOctets *float64
+ InBcastOctets *float64
+ OutBcastOctets *float64
+ InNoECTPkts *float64
+ InECT1Pkts *float64
+ InECT0Pkts *float64
+ InCEPkts *float64
}
type Icmp6 struct {
- InMsgs float64
- InErrors float64
- OutMsgs float64
- OutErrors float64
- InCsumErrors float64
- InDestUnreachs float64
- InPktTooBigs float64
- InTimeExcds float64
- InParmProblems float64
- InEchos float64
- InEchoReplies float64
- InGroupMembQueries float64
- InGroupMembResponses float64
- InGroupMembReductions float64
- InRouterSolicits float64
- InRouterAdvertisements float64
- InNeighborSolicits float64
- InNeighborAdvertisements float64
- InRedirects float64
- InMLDv2Reports float64
- OutDestUnreachs float64
- OutPktTooBigs float64
- OutTimeExcds float64
- OutParmProblems float64
- OutEchos float64
- OutEchoReplies float64
- OutGroupMembQueries float64
- OutGroupMembResponses float64
- OutGroupMembReductions float64
- OutRouterSolicits float64
- OutRouterAdvertisements float64
- OutNeighborSolicits float64
- OutNeighborAdvertisements float64
- OutRedirects float64
- OutMLDv2Reports float64
- InType1 float64
- InType134 float64
- InType135 float64
- InType136 float64
- InType143 float64
- OutType133 float64
- OutType135 float64
- OutType136 float64
- OutType143 float64
+ InMsgs *float64
+ InErrors *float64
+ OutMsgs *float64
+ OutErrors *float64
+ InCsumErrors *float64
+ InDestUnreachs *float64
+ InPktTooBigs *float64
+ InTimeExcds *float64
+ InParmProblems *float64
+ InEchos *float64
+ InEchoReplies *float64
+ InGroupMembQueries *float64
+ InGroupMembResponses *float64
+ InGroupMembReductions *float64
+ InRouterSolicits *float64
+ InRouterAdvertisements *float64
+ InNeighborSolicits *float64
+ InNeighborAdvertisements *float64
+ InRedirects *float64
+ InMLDv2Reports *float64
+ OutDestUnreachs *float64
+ OutPktTooBigs *float64
+ OutTimeExcds *float64
+ OutParmProblems *float64
+ OutEchos *float64
+ OutEchoReplies *float64
+ OutGroupMembQueries *float64
+ OutGroupMembResponses *float64
+ OutGroupMembReductions *float64
+ OutRouterSolicits *float64
+ OutRouterAdvertisements *float64
+ OutNeighborSolicits *float64
+ OutNeighborAdvertisements *float64
+ OutRedirects *float64
+ OutMLDv2Reports *float64
+ InType1 *float64
+ InType134 *float64
+ InType135 *float64
+ InType136 *float64
+ InType143 *float64
+ OutType133 *float64
+ OutType135 *float64
+ OutType136 *float64
+ OutType143 *float64
}
type Udp6 struct { // nolint:revive
- InDatagrams float64
- NoPorts float64
- InErrors float64
- OutDatagrams float64
- RcvbufErrors float64
- SndbufErrors float64
- InCsumErrors float64
- IgnoredMulti float64
+ InDatagrams *float64
+ NoPorts *float64
+ InErrors *float64
+ OutDatagrams *float64
+ RcvbufErrors *float64
+ SndbufErrors *float64
+ InCsumErrors *float64
+ IgnoredMulti *float64
}
type UdpLite6 struct { // nolint:revive
- InDatagrams float64
- NoPorts float64
- InErrors float64
- OutDatagrams float64
- RcvbufErrors float64
- SndbufErrors float64
- InCsumErrors float64
+ InDatagrams *float64
+ NoPorts *float64
+ InErrors *float64
+ OutDatagrams *float64
+ RcvbufErrors *float64
+ SndbufErrors *float64
+ InCsumErrors *float64
}
func (p Proc) Snmp6() (ProcSnmp6, error) {
@@ -182,197 +182,197 @@ func parseSNMP6Stats(r io.Reader) (ProcSnmp6, error) {
case "Ip6":
switch key {
case "InReceives":
- procSnmp6.Ip6.InReceives = value
+ procSnmp6.Ip6.InReceives = &value
case "InHdrErrors":
- procSnmp6.Ip6.InHdrErrors = value
+ procSnmp6.Ip6.InHdrErrors = &value
case "InTooBigErrors":
- procSnmp6.Ip6.InTooBigErrors = value
+ procSnmp6.Ip6.InTooBigErrors = &value
case "InNoRoutes":
- procSnmp6.Ip6.InNoRoutes = value
+ procSnmp6.Ip6.InNoRoutes = &value
case "InAddrErrors":
- procSnmp6.Ip6.InAddrErrors = value
+ procSnmp6.Ip6.InAddrErrors = &value
case "InUnknownProtos":
- procSnmp6.Ip6.InUnknownProtos = value
+ procSnmp6.Ip6.InUnknownProtos = &value
case "InTruncatedPkts":
- procSnmp6.Ip6.InTruncatedPkts = value
+ procSnmp6.Ip6.InTruncatedPkts = &value
case "InDiscards":
- procSnmp6.Ip6.InDiscards = value
+ procSnmp6.Ip6.InDiscards = &value
case "InDelivers":
- procSnmp6.Ip6.InDelivers = value
+ procSnmp6.Ip6.InDelivers = &value
case "OutForwDatagrams":
- procSnmp6.Ip6.OutForwDatagrams = value
+ procSnmp6.Ip6.OutForwDatagrams = &value
case "OutRequests":
- procSnmp6.Ip6.OutRequests = value
+ procSnmp6.Ip6.OutRequests = &value
case "OutDiscards":
- procSnmp6.Ip6.OutDiscards = value
+ procSnmp6.Ip6.OutDiscards = &value
case "OutNoRoutes":
- procSnmp6.Ip6.OutNoRoutes = value
+ procSnmp6.Ip6.OutNoRoutes = &value
case "ReasmTimeout":
- procSnmp6.Ip6.ReasmTimeout = value
+ procSnmp6.Ip6.ReasmTimeout = &value
case "ReasmReqds":
- procSnmp6.Ip6.ReasmReqds = value
+ procSnmp6.Ip6.ReasmReqds = &value
case "ReasmOKs":
- procSnmp6.Ip6.ReasmOKs = value
+ procSnmp6.Ip6.ReasmOKs = &value
case "ReasmFails":
- procSnmp6.Ip6.ReasmFails = value
+ procSnmp6.Ip6.ReasmFails = &value
case "FragOKs":
- procSnmp6.Ip6.FragOKs = value
+ procSnmp6.Ip6.FragOKs = &value
case "FragFails":
- procSnmp6.Ip6.FragFails = value
+ procSnmp6.Ip6.FragFails = &value
case "FragCreates":
- procSnmp6.Ip6.FragCreates = value
+ procSnmp6.Ip6.FragCreates = &value
case "InMcastPkts":
- procSnmp6.Ip6.InMcastPkts = value
+ procSnmp6.Ip6.InMcastPkts = &value
case "OutMcastPkts":
- procSnmp6.Ip6.OutMcastPkts = value
+ procSnmp6.Ip6.OutMcastPkts = &value
case "InOctets":
- procSnmp6.Ip6.InOctets = value
+ procSnmp6.Ip6.InOctets = &value
case "OutOctets":
- procSnmp6.Ip6.OutOctets = value
+ procSnmp6.Ip6.OutOctets = &value
case "InMcastOctets":
- procSnmp6.Ip6.InMcastOctets = value
+ procSnmp6.Ip6.InMcastOctets = &value
case "OutMcastOctets":
- procSnmp6.Ip6.OutMcastOctets = value
+ procSnmp6.Ip6.OutMcastOctets = &value
case "InBcastOctets":
- procSnmp6.Ip6.InBcastOctets = value
+ procSnmp6.Ip6.InBcastOctets = &value
case "OutBcastOctets":
- procSnmp6.Ip6.OutBcastOctets = value
+ procSnmp6.Ip6.OutBcastOctets = &value
case "InNoECTPkts":
- procSnmp6.Ip6.InNoECTPkts = value
+ procSnmp6.Ip6.InNoECTPkts = &value
case "InECT1Pkts":
- procSnmp6.Ip6.InECT1Pkts = value
+ procSnmp6.Ip6.InECT1Pkts = &value
case "InECT0Pkts":
- procSnmp6.Ip6.InECT0Pkts = value
+ procSnmp6.Ip6.InECT0Pkts = &value
case "InCEPkts":
- procSnmp6.Ip6.InCEPkts = value
+ procSnmp6.Ip6.InCEPkts = &value
}
case "Icmp6":
switch key {
case "InMsgs":
- procSnmp6.Icmp6.InMsgs = value
+ procSnmp6.Icmp6.InMsgs = &value
case "InErrors":
- procSnmp6.Icmp6.InErrors = value
+ procSnmp6.Icmp6.InErrors = &value
case "OutMsgs":
- procSnmp6.Icmp6.OutMsgs = value
+ procSnmp6.Icmp6.OutMsgs = &value
case "OutErrors":
- procSnmp6.Icmp6.OutErrors = value
+ procSnmp6.Icmp6.OutErrors = &value
case "InCsumErrors":
- procSnmp6.Icmp6.InCsumErrors = value
+ procSnmp6.Icmp6.InCsumErrors = &value
case "InDestUnreachs":
- procSnmp6.Icmp6.InDestUnreachs = value
+ procSnmp6.Icmp6.InDestUnreachs = &value
case "InPktTooBigs":
- procSnmp6.Icmp6.InPktTooBigs = value
+ procSnmp6.Icmp6.InPktTooBigs = &value
case "InTimeExcds":
- procSnmp6.Icmp6.InTimeExcds = value
+ procSnmp6.Icmp6.InTimeExcds = &value
case "InParmProblems":
- procSnmp6.Icmp6.InParmProblems = value
+ procSnmp6.Icmp6.InParmProblems = &value
case "InEchos":
- procSnmp6.Icmp6.InEchos = value
+ procSnmp6.Icmp6.InEchos = &value
case "InEchoReplies":
- procSnmp6.Icmp6.InEchoReplies = value
+ procSnmp6.Icmp6.InEchoReplies = &value
case "InGroupMembQueries":
- procSnmp6.Icmp6.InGroupMembQueries = value
+ procSnmp6.Icmp6.InGroupMembQueries = &value
case "InGroupMembResponses":
- procSnmp6.Icmp6.InGroupMembResponses = value
+ procSnmp6.Icmp6.InGroupMembResponses = &value
case "InGroupMembReductions":
- procSnmp6.Icmp6.InGroupMembReductions = value
+ procSnmp6.Icmp6.InGroupMembReductions = &value
case "InRouterSolicits":
- procSnmp6.Icmp6.InRouterSolicits = value
+ procSnmp6.Icmp6.InRouterSolicits = &value
case "InRouterAdvertisements":
- procSnmp6.Icmp6.InRouterAdvertisements = value
+ procSnmp6.Icmp6.InRouterAdvertisements = &value
case "InNeighborSolicits":
- procSnmp6.Icmp6.InNeighborSolicits = value
+ procSnmp6.Icmp6.InNeighborSolicits = &value
case "InNeighborAdvertisements":
- procSnmp6.Icmp6.InNeighborAdvertisements = value
+ procSnmp6.Icmp6.InNeighborAdvertisements = &value
case "InRedirects":
- procSnmp6.Icmp6.InRedirects = value
+ procSnmp6.Icmp6.InRedirects = &value
case "InMLDv2Reports":
- procSnmp6.Icmp6.InMLDv2Reports = value
+ procSnmp6.Icmp6.InMLDv2Reports = &value
case "OutDestUnreachs":
- procSnmp6.Icmp6.OutDestUnreachs = value
+ procSnmp6.Icmp6.OutDestUnreachs = &value
case "OutPktTooBigs":
- procSnmp6.Icmp6.OutPktTooBigs = value
+ procSnmp6.Icmp6.OutPktTooBigs = &value
case "OutTimeExcds":
- procSnmp6.Icmp6.OutTimeExcds = value
+ procSnmp6.Icmp6.OutTimeExcds = &value
case "OutParmProblems":
- procSnmp6.Icmp6.OutParmProblems = value
+ procSnmp6.Icmp6.OutParmProblems = &value
case "OutEchos":
- procSnmp6.Icmp6.OutEchos = value
+ procSnmp6.Icmp6.OutEchos = &value
case "OutEchoReplies":
- procSnmp6.Icmp6.OutEchoReplies = value
+ procSnmp6.Icmp6.OutEchoReplies = &value
case "OutGroupMembQueries":
- procSnmp6.Icmp6.OutGroupMembQueries = value
+ procSnmp6.Icmp6.OutGroupMembQueries = &value
case "OutGroupMembResponses":
- procSnmp6.Icmp6.OutGroupMembResponses = value
+ procSnmp6.Icmp6.OutGroupMembResponses = &value
case "OutGroupMembReductions":
- procSnmp6.Icmp6.OutGroupMembReductions = value
+ procSnmp6.Icmp6.OutGroupMembReductions = &value
case "OutRouterSolicits":
- procSnmp6.Icmp6.OutRouterSolicits = value
+ procSnmp6.Icmp6.OutRouterSolicits = &value
case "OutRouterAdvertisements":
- procSnmp6.Icmp6.OutRouterAdvertisements = value
+ procSnmp6.Icmp6.OutRouterAdvertisements = &value
case "OutNeighborSolicits":
- procSnmp6.Icmp6.OutNeighborSolicits = value
+ procSnmp6.Icmp6.OutNeighborSolicits = &value
case "OutNeighborAdvertisements":
- procSnmp6.Icmp6.OutNeighborAdvertisements = value
+ procSnmp6.Icmp6.OutNeighborAdvertisements = &value
case "OutRedirects":
- procSnmp6.Icmp6.OutRedirects = value
+ procSnmp6.Icmp6.OutRedirects = &value
case "OutMLDv2Reports":
- procSnmp6.Icmp6.OutMLDv2Reports = value
+ procSnmp6.Icmp6.OutMLDv2Reports = &value
case "InType1":
- procSnmp6.Icmp6.InType1 = value
+ procSnmp6.Icmp6.InType1 = &value
case "InType134":
- procSnmp6.Icmp6.InType134 = value
+ procSnmp6.Icmp6.InType134 = &value
case "InType135":
- procSnmp6.Icmp6.InType135 = value
+ procSnmp6.Icmp6.InType135 = &value
case "InType136":
- procSnmp6.Icmp6.InType136 = value
+ procSnmp6.Icmp6.InType136 = &value
case "InType143":
- procSnmp6.Icmp6.InType143 = value
+ procSnmp6.Icmp6.InType143 = &value
case "OutType133":
- procSnmp6.Icmp6.OutType133 = value
+ procSnmp6.Icmp6.OutType133 = &value
case "OutType135":
- procSnmp6.Icmp6.OutType135 = value
+ procSnmp6.Icmp6.OutType135 = &value
case "OutType136":
- procSnmp6.Icmp6.OutType136 = value
+ procSnmp6.Icmp6.OutType136 = &value
case "OutType143":
- procSnmp6.Icmp6.OutType143 = value
+ procSnmp6.Icmp6.OutType143 = &value
}
case "Udp6":
switch key {
case "InDatagrams":
- procSnmp6.Udp6.InDatagrams = value
+ procSnmp6.Udp6.InDatagrams = &value
case "NoPorts":
- procSnmp6.Udp6.NoPorts = value
+ procSnmp6.Udp6.NoPorts = &value
case "InErrors":
- procSnmp6.Udp6.InErrors = value
+ procSnmp6.Udp6.InErrors = &value
case "OutDatagrams":
- procSnmp6.Udp6.OutDatagrams = value
+ procSnmp6.Udp6.OutDatagrams = &value
case "RcvbufErrors":
- procSnmp6.Udp6.RcvbufErrors = value
+ procSnmp6.Udp6.RcvbufErrors = &value
case "SndbufErrors":
- procSnmp6.Udp6.SndbufErrors = value
+ procSnmp6.Udp6.SndbufErrors = &value
case "InCsumErrors":
- procSnmp6.Udp6.InCsumErrors = value
+ procSnmp6.Udp6.InCsumErrors = &value
case "IgnoredMulti":
- procSnmp6.Udp6.IgnoredMulti = value
+ procSnmp6.Udp6.IgnoredMulti = &value
}
case "UdpLite6":
switch key {
case "InDatagrams":
- procSnmp6.UdpLite6.InDatagrams = value
+ procSnmp6.UdpLite6.InDatagrams = &value
case "NoPorts":
- procSnmp6.UdpLite6.NoPorts = value
+ procSnmp6.UdpLite6.NoPorts = &value
case "InErrors":
- procSnmp6.UdpLite6.InErrors = value
+ procSnmp6.UdpLite6.InErrors = &value
case "OutDatagrams":
- procSnmp6.UdpLite6.OutDatagrams = value
+ procSnmp6.UdpLite6.OutDatagrams = &value
case "RcvbufErrors":
- procSnmp6.UdpLite6.RcvbufErrors = value
+ procSnmp6.UdpLite6.RcvbufErrors = &value
case "SndbufErrors":
- procSnmp6.UdpLite6.SndbufErrors = value
+ procSnmp6.UdpLite6.SndbufErrors = &value
case "InCsumErrors":
- procSnmp6.UdpLite6.InCsumErrors = value
+ procSnmp6.UdpLite6.InCsumErrors = &value
}
}
}
diff --git a/vendor/github.com/prometheus/procfs/proc_stat.go b/vendor/github.com/prometheus/procfs/proc_stat.go
index 06c556ef96..14b249f4fc 100644
--- a/vendor/github.com/prometheus/procfs/proc_stat.go
+++ b/vendor/github.com/prometheus/procfs/proc_stat.go
@@ -18,7 +18,6 @@ import (
"fmt"
"os"
- "github.com/prometheus/procfs/internal/fs"
"github.com/prometheus/procfs/internal/util"
)
@@ -102,6 +101,8 @@ type ProcStat struct {
RSS int
// Soft limit in bytes on the rss of the process.
RSSLimit uint64
+ // CPU number last executed on.
+ Processor uint
// Real-time scheduling priority, a number in the range 1 to 99 for processes
// scheduled under a real-time policy, or 0, for non-real-time processes.
RTPriority uint
@@ -110,7 +111,7 @@ type ProcStat struct {
// Aggregated block I/O delays, measured in clock ticks (centiseconds).
DelayAcctBlkIOTicks uint64
- proc fs.FS
+ proc FS
}
// NewStat returns the current status information of the process.
@@ -184,7 +185,7 @@ func (p Proc) Stat() (ProcStat, error) {
&ignoreUint64,
&ignoreUint64,
&ignoreInt64,
- &ignoreInt64,
+ &s.Processor,
&s.RTPriority,
&s.Policy,
&s.DelayAcctBlkIOTicks,
@@ -208,8 +209,7 @@ func (s ProcStat) ResidentMemory() int {
// StartTime returns the unix timestamp of the process in seconds.
func (s ProcStat) StartTime() (float64, error) {
- fs := FS{proc: s.proc}
- stat, err := fs.Stat()
+ stat, err := s.proc.Stat()
if err != nil {
return 0, err
}
diff --git a/vendor/github.com/prometheus/procfs/proc_status.go b/vendor/github.com/prometheus/procfs/proc_status.go
index 594022ded4..c055d075db 100644
--- a/vendor/github.com/prometheus/procfs/proc_status.go
+++ b/vendor/github.com/prometheus/procfs/proc_status.go
@@ -15,6 +15,7 @@ package procfs
import (
"bytes"
+ "sort"
"strconv"
"strings"
@@ -76,6 +77,9 @@ type ProcStatus struct {
UIDs [4]string
// GIDs of the process (Real, effective, saved set, and filesystem GIDs)
GIDs [4]string
+
+ // CpusAllowedList: List of cpu cores processes are allowed to run on.
+ CpusAllowedList []uint64
}
// NewStatus returns the current status information of the process.
@@ -96,10 +100,10 @@ func (p Proc) NewStatus() (ProcStatus, error) {
kv := strings.SplitN(line, ":", 2)
// removes spaces
- k := string(strings.TrimSpace(kv[0]))
- v := string(strings.TrimSpace(kv[1]))
+ k := strings.TrimSpace(kv[0])
+ v := strings.TrimSpace(kv[1])
// removes "kB"
- v = string(bytes.Trim([]byte(v), " kB"))
+ v = strings.TrimSuffix(v, " kB")
// value to int when possible
// we can skip error check here, 'cause vKBytes is not used when value is a string
@@ -161,10 +165,38 @@ func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintByt
s.VoluntaryCtxtSwitches = vUint
case "nonvoluntary_ctxt_switches":
s.NonVoluntaryCtxtSwitches = vUint
+ case "Cpus_allowed_list":
+ s.CpusAllowedList = calcCpusAllowedList(vString)
}
+
}
// TotalCtxtSwitches returns the total context switch.
func (s ProcStatus) TotalCtxtSwitches() uint64 {
return s.VoluntaryCtxtSwitches + s.NonVoluntaryCtxtSwitches
}
+
+func calcCpusAllowedList(cpuString string) []uint64 {
+ s := strings.Split(cpuString, ",")
+
+ var g []uint64
+
+ for _, cpu := range s {
+ // parse cpu ranges, example: 1-3=[1,2,3]
+ if l := strings.Split(strings.TrimSpace(cpu), "-"); len(l) > 1 {
+ startCPU, _ := strconv.ParseUint(l[0], 10, 64)
+ endCPU, _ := strconv.ParseUint(l[1], 10, 64)
+
+ for i := startCPU; i <= endCPU; i++ {
+ g = append(g, i)
+ }
+ } else if len(l) == 1 {
+ cpu, _ := strconv.ParseUint(l[0], 10, 64)
+ g = append(g, cpu)
+ }
+
+ }
+
+ sort.Slice(g, func(i, j int) bool { return g[i] < g[j] })
+ return g
+}
diff --git a/vendor/github.com/prometheus/procfs/stat.go b/vendor/github.com/prometheus/procfs/stat.go
index 33f97caa08..586af48af9 100644
--- a/vendor/github.com/prometheus/procfs/stat.go
+++ b/vendor/github.com/prometheus/procfs/stat.go
@@ -62,7 +62,7 @@ type Stat struct {
// Summed up cpu statistics.
CPUTotal CPUStat
// Per-CPU statistics.
- CPU []CPUStat
+ CPU map[int64]CPUStat
// Number of times interrupts were handled, which contains numbered and unnumbered IRQs.
IRQTotal uint64
// Number of times a numbered IRQ was triggered.
@@ -170,10 +170,23 @@ func (fs FS) Stat() (Stat, error) {
if err != nil {
return Stat{}, err
}
+ procStat, err := parseStat(bytes.NewReader(data), fileName)
+ if err != nil {
+ return Stat{}, err
+ }
+ return procStat, nil
+}
- stat := Stat{}
+// parseStat parses the metrics from /proc/[pid]/stat.
+func parseStat(r io.Reader, fileName string) (Stat, error) {
+ var (
+ scanner = bufio.NewScanner(r)
+ stat = Stat{
+ CPU: make(map[int64]CPUStat),
+ }
+ err error
+ )
- scanner := bufio.NewScanner(bytes.NewReader(data))
for scanner.Scan() {
line := scanner.Text()
parts := strings.Fields(scanner.Text())
@@ -228,9 +241,6 @@ func (fs FS) Stat() (Stat, error) {
if cpuID == -1 {
stat.CPUTotal = cpuStat
} else {
- for int64(len(stat.CPU)) <= cpuID {
- stat.CPU = append(stat.CPU, CPUStat{})
- }
stat.CPU[cpuID] = cpuStat
}
}
diff --git a/vendor/github.com/prometheus/procfs/thread.go b/vendor/github.com/prometheus/procfs/thread.go
new file mode 100644
index 0000000000..490c14708d
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/thread.go
@@ -0,0 +1,80 @@
+// Copyright 2022 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "fmt"
+ "os"
+ "strconv"
+
+ fsi "github.com/prometheus/procfs/internal/fs"
+)
+
+// Provide access to /proc/PID/task/TID files, for thread specific values. Since
+// such files have the same structure as /proc/PID/ ones, the data structures
+// and the parsers for the latter may be reused.
+
+// AllThreads returns a list of all currently available threads under /proc/PID.
+func AllThreads(pid int) (Procs, error) {
+ fs, err := NewFS(DefaultMountPoint)
+ if err != nil {
+ return Procs{}, err
+ }
+ return fs.AllThreads(pid)
+}
+
+// AllThreads returns a list of all currently available threads for PID.
+func (fs FS) AllThreads(pid int) (Procs, error) {
+ taskPath := fs.proc.Path(strconv.Itoa(pid), "task")
+ d, err := os.Open(taskPath)
+ if err != nil {
+ return Procs{}, err
+ }
+ defer d.Close()
+
+ names, err := d.Readdirnames(-1)
+ if err != nil {
+ return Procs{}, fmt.Errorf("could not read %q: %w", d.Name(), err)
+ }
+
+ t := Procs{}
+ for _, n := range names {
+ tid, err := strconv.ParseInt(n, 10, 64)
+ if err != nil {
+ continue
+ }
+
+ t = append(t, Proc{PID: int(tid), fs: FS{fsi.FS(taskPath), fs.real}})
+ }
+
+ return t, nil
+}
+
+// Thread returns a process for a given PID, TID.
+func (fs FS) Thread(pid, tid int) (Proc, error) {
+ taskPath := fs.proc.Path(strconv.Itoa(pid), "task")
+ if _, err := os.Stat(taskPath); err != nil {
+ return Proc{}, err
+ }
+ return Proc{PID: tid, fs: FS{fsi.FS(taskPath), fs.real}}, nil
+}
+
+// Thread returns a process for a given TID of Proc.
+func (proc Proc) Thread(tid int) (Proc, error) {
+ tfs := FS{fsi.FS(proc.path("task")), proc.fs.real}
+ if _, err := os.Stat(tfs.proc.Path(strconv.Itoa(tid))); err != nil {
+ return Proc{}, err
+ }
+ return Proc{PID: tid, fs: tfs}, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/vm.go b/vendor/github.com/prometheus/procfs/vm.go
index 20ceb77e2d..cdedcae996 100644
--- a/vendor/github.com/prometheus/procfs/vm.go
+++ b/vendor/github.com/prometheus/procfs/vm.go
@@ -26,7 +26,9 @@ import (
)
// The VM interface is described at
-// https://www.kernel.org/doc/Documentation/sysctl/vm.txt
+//
+// https://www.kernel.org/doc/Documentation/sysctl/vm.txt
+//
// Each setting is exposed as a single file.
// Each file contains one line with a single numerical value, except lowmem_reserve_ratio which holds an array
// and numa_zonelist_order (deprecated) which is a string.
diff --git a/vendor/golang.org/x/exp/LICENSE b/vendor/golang.org/x/exp/LICENSE
new file mode 100644
index 0000000000..6a66aea5ea
--- /dev/null
+++ b/vendor/golang.org/x/exp/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2009 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/golang.org/x/exp/PATENTS b/vendor/golang.org/x/exp/PATENTS
new file mode 100644
index 0000000000..733099041f
--- /dev/null
+++ b/vendor/golang.org/x/exp/PATENTS
@@ -0,0 +1,22 @@
+Additional IP Rights Grant (Patents)
+
+"This implementation" means the copyrightable works distributed by
+Google as part of the Go project.
+
+Google hereby grants to You a perpetual, worldwide, non-exclusive,
+no-charge, royalty-free, irrevocable (except as stated in this section)
+patent license to make, have made, use, offer to sell, sell, import,
+transfer and otherwise run, modify and propagate the contents of this
+implementation of Go, where such license applies only to those patent
+claims, both currently owned or controlled by Google and acquired in
+the future, licensable by Google that are necessarily infringed by this
+implementation of Go. This grant does not include claims that would be
+infringed only as a consequence of further modification of this
+implementation. If you or your agent or exclusive licensee institute or
+order or agree to the institution of patent litigation against any
+entity (including a cross-claim or counterclaim in a lawsuit) alleging
+that this implementation of Go or any code incorporated within this
+implementation of Go constitutes direct or contributory patent
+infringement, or inducement of patent infringement, then any patent
+rights granted to you under this License for this implementation of Go
+shall terminate as of the date such litigation is filed.
diff --git a/vendor/golang.org/x/exp/constraints/constraints.go b/vendor/golang.org/x/exp/constraints/constraints.go
new file mode 100644
index 0000000000..2c033dff47
--- /dev/null
+++ b/vendor/golang.org/x/exp/constraints/constraints.go
@@ -0,0 +1,50 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package constraints defines a set of useful constraints to be used
+// with type parameters.
+package constraints
+
+// Signed is a constraint that permits any signed integer type.
+// If future releases of Go add new predeclared signed integer types,
+// this constraint will be modified to include them.
+type Signed interface {
+ ~int | ~int8 | ~int16 | ~int32 | ~int64
+}
+
+// Unsigned is a constraint that permits any unsigned integer type.
+// If future releases of Go add new predeclared unsigned integer types,
+// this constraint will be modified to include them.
+type Unsigned interface {
+ ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr
+}
+
+// Integer is a constraint that permits any integer type.
+// If future releases of Go add new predeclared integer types,
+// this constraint will be modified to include them.
+type Integer interface {
+ Signed | Unsigned
+}
+
+// Float is a constraint that permits any floating-point type.
+// If future releases of Go add new predeclared floating-point types,
+// this constraint will be modified to include them.
+type Float interface {
+ ~float32 | ~float64
+}
+
+// Complex is a constraint that permits any complex numeric type.
+// If future releases of Go add new predeclared complex numeric types,
+// this constraint will be modified to include them.
+type Complex interface {
+ ~complex64 | ~complex128
+}
+
+// Ordered is a constraint that permits any ordered type: any type
+// that supports the operators < <= >= >.
+// If future releases of Go add new ordered types,
+// this constraint will be modified to include them.
+type Ordered interface {
+ Integer | Float | ~string
+}
diff --git a/vendor/golang.org/x/exp/slices/slices.go b/vendor/golang.org/x/exp/slices/slices.go
new file mode 100644
index 0000000000..8a237c5d61
--- /dev/null
+++ b/vendor/golang.org/x/exp/slices/slices.go
@@ -0,0 +1,218 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package slices defines various functions useful with slices of any type.
+// Unless otherwise specified, these functions all apply to the elements
+// of a slice at index 0 <= i < len(s).
+//
+// Note that the less function in IsSortedFunc, SortFunc, SortStableFunc requires a
+// strict weak ordering (https://en.wikipedia.org/wiki/Weak_ordering#Strict_weak_orderings),
+// or the sorting may fail to sort correctly. A common case is when sorting slices of
+// floating-point numbers containing NaN values.
+package slices
+
+import "golang.org/x/exp/constraints"
+
+// Equal reports whether two slices are equal: the same length and all
+// elements equal. If the lengths are different, Equal returns false.
+// Otherwise, the elements are compared in increasing index order, and the
+// comparison stops at the first unequal pair.
+// Floating point NaNs are not considered equal.
+func Equal[E comparable](s1, s2 []E) bool {
+ if len(s1) != len(s2) {
+ return false
+ }
+ for i := range s1 {
+ if s1[i] != s2[i] {
+ return false
+ }
+ }
+ return true
+}
+
+// EqualFunc reports whether two slices are equal using a comparison
+// function on each pair of elements. If the lengths are different,
+// EqualFunc returns false. Otherwise, the elements are compared in
+// increasing index order, and the comparison stops at the first index
+// for which eq returns false.
+func EqualFunc[E1, E2 any](s1 []E1, s2 []E2, eq func(E1, E2) bool) bool {
+ if len(s1) != len(s2) {
+ return false
+ }
+ for i, v1 := range s1 {
+ v2 := s2[i]
+ if !eq(v1, v2) {
+ return false
+ }
+ }
+ return true
+}
+
+// Compare compares the elements of s1 and s2.
+// The elements are compared sequentially, starting at index 0,
+// until one element is not equal to the other.
+// The result of comparing the first non-matching elements is returned.
+// If both slices are equal until one of them ends, the shorter slice is
+// considered less than the longer one.
+// The result is 0 if s1 == s2, -1 if s1 < s2, and +1 if s1 > s2.
+// Comparisons involving floating point NaNs are ignored.
+func Compare[E constraints.Ordered](s1, s2 []E) int {
+ s2len := len(s2)
+ for i, v1 := range s1 {
+ if i >= s2len {
+ return +1
+ }
+ v2 := s2[i]
+ switch {
+ case v1 < v2:
+ return -1
+ case v1 > v2:
+ return +1
+ }
+ }
+ if len(s1) < s2len {
+ return -1
+ }
+ return 0
+}
+
+// CompareFunc is like Compare but uses a comparison function
+// on each pair of elements. The elements are compared in increasing
+// index order, and the comparisons stop after the first time cmp
+// returns non-zero.
+// The result is the first non-zero result of cmp; if cmp always
+// returns 0 the result is 0 if len(s1) == len(s2), -1 if len(s1) < len(s2),
+// and +1 if len(s1) > len(s2).
+func CompareFunc[E1, E2 any](s1 []E1, s2 []E2, cmp func(E1, E2) int) int {
+ s2len := len(s2)
+ for i, v1 := range s1 {
+ if i >= s2len {
+ return +1
+ }
+ v2 := s2[i]
+ if c := cmp(v1, v2); c != 0 {
+ return c
+ }
+ }
+ if len(s1) < s2len {
+ return -1
+ }
+ return 0
+}
+
+// Index returns the index of the first occurrence of v in s,
+// or -1 if not present.
+func Index[E comparable](s []E, v E) int {
+ for i, vs := range s {
+ if v == vs {
+ return i
+ }
+ }
+ return -1
+}
+
+// IndexFunc returns the first index i satisfying f(s[i]),
+// or -1 if none do.
+func IndexFunc[E any](s []E, f func(E) bool) int {
+ for i, v := range s {
+ if f(v) {
+ return i
+ }
+ }
+ return -1
+}
+
+// Contains reports whether v is present in s.
+func Contains[E comparable](s []E, v E) bool {
+ return Index(s, v) >= 0
+}
+
+// Insert inserts the values v... into s at index i,
+// returning the modified slice.
+// In the returned slice r, r[i] == v[0].
+// Insert panics if i is out of range.
+// This function is O(len(s) + len(v)).
+func Insert[S ~[]E, E any](s S, i int, v ...E) S {
+ tot := len(s) + len(v)
+ if tot <= cap(s) {
+ s2 := s[:tot]
+ copy(s2[i+len(v):], s[i:])
+ copy(s2[i:], v)
+ return s2
+ }
+ s2 := make(S, tot)
+ copy(s2, s[:i])
+ copy(s2[i:], v)
+ copy(s2[i+len(v):], s[i:])
+ return s2
+}
+
+// Delete removes the elements s[i:j] from s, returning the modified slice.
+// Delete panics if s[i:j] is not a valid slice of s.
+// Delete modifies the contents of the slice s; it does not create a new slice.
+// Delete is O(len(s)-(j-i)), so if many items must be deleted, it is better to
+// make a single call deleting them all together than to delete one at a time.
+func Delete[S ~[]E, E any](s S, i, j int) S {
+ return append(s[:i], s[j:]...)
+}
+
+// Clone returns a copy of the slice.
+// The elements are copied using assignment, so this is a shallow clone.
+func Clone[S ~[]E, E any](s S) S {
+ // Preserve nil in case it matters.
+ if s == nil {
+ return nil
+ }
+ return append(S([]E{}), s...)
+}
+
+// Compact replaces consecutive runs of equal elements with a single copy.
+// This is like the uniq command found on Unix.
+// Compact modifies the contents of the slice s; it does not create a new slice.
+func Compact[S ~[]E, E comparable](s S) S {
+ if len(s) == 0 {
+ return s
+ }
+ i := 1
+ last := s[0]
+ for _, v := range s[1:] {
+ if v != last {
+ s[i] = v
+ i++
+ last = v
+ }
+ }
+ return s[:i]
+}
+
+// CompactFunc is like Compact but uses a comparison function.
+func CompactFunc[S ~[]E, E any](s S, eq func(E, E) bool) S {
+ if len(s) == 0 {
+ return s
+ }
+ i := 1
+ last := s[0]
+ for _, v := range s[1:] {
+ if !eq(v, last) {
+ s[i] = v
+ i++
+ last = v
+ }
+ }
+ return s[:i]
+}
+
+// Grow increases the slice's capacity, if necessary, to guarantee space for
+// another n elements. After Grow(n), at least n elements can be appended
+// to the slice without another allocation. Grow may modify elements of the
+// slice between the length and the capacity. If n is negative or too large to
+// allocate the memory, Grow panics.
+func Grow[S ~[]E, E any](s S, n int) S {
+ return append(s, make(S, n)...)[:len(s)]
+}
+
+// Clip removes unused capacity from the slice, returning s[:len(s):len(s)].
+func Clip[S ~[]E, E any](s S) S {
+ return s[:len(s):len(s)]
+}
diff --git a/vendor/golang.org/x/exp/slices/sort.go b/vendor/golang.org/x/exp/slices/sort.go
new file mode 100644
index 0000000000..c22e74bd10
--- /dev/null
+++ b/vendor/golang.org/x/exp/slices/sort.go
@@ -0,0 +1,127 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package slices
+
+import (
+ "math/bits"
+
+ "golang.org/x/exp/constraints"
+)
+
+// Sort sorts a slice of any ordered type in ascending order.
+// Sort may fail to sort correctly when sorting slices of floating-point
+// numbers containing Not-a-number (NaN) values.
+// Use slices.SortFunc(x, func(a, b float64) bool {return a < b || (math.IsNaN(a) && !math.IsNaN(b))})
+// instead if the input may contain NaNs.
+func Sort[E constraints.Ordered](x []E) {
+ n := len(x)
+ pdqsortOrdered(x, 0, n, bits.Len(uint(n)))
+}
+
+// SortFunc sorts the slice x in ascending order as determined by the less function.
+// This sort is not guaranteed to be stable.
+//
+// SortFunc requires that less is a strict weak ordering.
+// See https://en.wikipedia.org/wiki/Weak_ordering#Strict_weak_orderings.
+func SortFunc[E any](x []E, less func(a, b E) bool) {
+ n := len(x)
+ pdqsortLessFunc(x, 0, n, bits.Len(uint(n)), less)
+}
+
+// SortStable sorts the slice x while keeping the original order of equal
+// elements, using less to compare elements.
+func SortStableFunc[E any](x []E, less func(a, b E) bool) {
+ stableLessFunc(x, len(x), less)
+}
+
+// IsSorted reports whether x is sorted in ascending order.
+func IsSorted[E constraints.Ordered](x []E) bool {
+ for i := len(x) - 1; i > 0; i-- {
+ if x[i] < x[i-1] {
+ return false
+ }
+ }
+ return true
+}
+
+// IsSortedFunc reports whether x is sorted in ascending order, with less as the
+// comparison function.
+func IsSortedFunc[E any](x []E, less func(a, b E) bool) bool {
+ for i := len(x) - 1; i > 0; i-- {
+ if less(x[i], x[i-1]) {
+ return false
+ }
+ }
+ return true
+}
+
+// BinarySearch searches for target in a sorted slice and returns the position
+// where target is found, or the position where target would appear in the
+// sort order; it also returns a bool saying whether the target is really found
+// in the slice. The slice must be sorted in increasing order.
+func BinarySearch[E constraints.Ordered](x []E, target E) (int, bool) {
+ // search returns the leftmost position where f returns true, or len(x) if f
+ // returns false for all x. This is the insertion position for target in x,
+ // and could point to an element that's either == target or not.
+ pos := search(len(x), func(i int) bool { return x[i] >= target })
+ if pos >= len(x) || x[pos] != target {
+ return pos, false
+ } else {
+ return pos, true
+ }
+}
+
+// BinarySearchFunc works like BinarySearch, but uses a custom comparison
+// function. The slice must be sorted in increasing order, where "increasing" is
+// defined by cmp. cmp(a, b) is expected to return an integer comparing the two
+// parameters: 0 if a == b, a negative number if a < b and a positive number if
+// a > b.
+func BinarySearchFunc[E any](x []E, target E, cmp func(E, E) int) (int, bool) {
+ pos := search(len(x), func(i int) bool { return cmp(x[i], target) >= 0 })
+ if pos >= len(x) || cmp(x[pos], target) != 0 {
+ return pos, false
+ } else {
+ return pos, true
+ }
+}
+
+func search(n int, f func(int) bool) int {
+ // Define f(-1) == false and f(n) == true.
+ // Invariant: f(i-1) == false, f(j) == true.
+ i, j := 0, n
+ for i < j {
+ h := int(uint(i+j) >> 1) // avoid overflow when computing h
+ // i ≤ h < j
+ if !f(h) {
+ i = h + 1 // preserves f(i-1) == false
+ } else {
+ j = h // preserves f(j) == true
+ }
+ }
+ // i == j, f(i-1) == false, and f(j) (= f(i)) == true => answer is i.
+ return i
+}
+
+type sortedHint int // hint for pdqsort when choosing the pivot
+
+const (
+ unknownHint sortedHint = iota
+ increasingHint
+ decreasingHint
+)
+
+// xorshift paper: https://www.jstatsoft.org/article/view/v008i14/xorshift.pdf
+type xorshift uint64
+
+func (r *xorshift) Next() uint64 {
+ *r ^= *r << 13
+ *r ^= *r >> 17
+ *r ^= *r << 5
+ return uint64(*r)
+}
+
+func nextPowerOfTwo(length int) uint {
+ return 1 << bits.Len(uint(length))
+}
diff --git a/vendor/golang.org/x/exp/slices/zsortfunc.go b/vendor/golang.org/x/exp/slices/zsortfunc.go
new file mode 100644
index 0000000000..2a632476c5
--- /dev/null
+++ b/vendor/golang.org/x/exp/slices/zsortfunc.go
@@ -0,0 +1,479 @@
+// Code generated by gen_sort_variants.go; DO NOT EDIT.
+
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package slices
+
+// insertionSortLessFunc sorts data[a:b] using insertion sort.
+func insertionSortLessFunc[E any](data []E, a, b int, less func(a, b E) bool) {
+ for i := a + 1; i < b; i++ {
+ for j := i; j > a && less(data[j], data[j-1]); j-- {
+ data[j], data[j-1] = data[j-1], data[j]
+ }
+ }
+}
+
+// siftDownLessFunc implements the heap property on data[lo:hi].
+// first is an offset into the array where the root of the heap lies.
+func siftDownLessFunc[E any](data []E, lo, hi, first int, less func(a, b E) bool) {
+ root := lo
+ for {
+ child := 2*root + 1
+ if child >= hi {
+ break
+ }
+ if child+1 < hi && less(data[first+child], data[first+child+1]) {
+ child++
+ }
+ if !less(data[first+root], data[first+child]) {
+ return
+ }
+ data[first+root], data[first+child] = data[first+child], data[first+root]
+ root = child
+ }
+}
+
+func heapSortLessFunc[E any](data []E, a, b int, less func(a, b E) bool) {
+ first := a
+ lo := 0
+ hi := b - a
+
+ // Build heap with greatest element at top.
+ for i := (hi - 1) / 2; i >= 0; i-- {
+ siftDownLessFunc(data, i, hi, first, less)
+ }
+
+ // Pop elements, largest first, into end of data.
+ for i := hi - 1; i >= 0; i-- {
+ data[first], data[first+i] = data[first+i], data[first]
+ siftDownLessFunc(data, lo, i, first, less)
+ }
+}
+
+// pdqsortLessFunc sorts data[a:b].
+// The algorithm based on pattern-defeating quicksort(pdqsort), but without the optimizations from BlockQuicksort.
+// pdqsort paper: https://arxiv.org/pdf/2106.05123.pdf
+// C++ implementation: https://github.com/orlp/pdqsort
+// Rust implementation: https://docs.rs/pdqsort/latest/pdqsort/
+// limit is the number of allowed bad (very unbalanced) pivots before falling back to heapsort.
+func pdqsortLessFunc[E any](data []E, a, b, limit int, less func(a, b E) bool) {
+ const maxInsertion = 12
+
+ var (
+ wasBalanced = true // whether the last partitioning was reasonably balanced
+ wasPartitioned = true // whether the slice was already partitioned
+ )
+
+ for {
+ length := b - a
+
+ if length <= maxInsertion {
+ insertionSortLessFunc(data, a, b, less)
+ return
+ }
+
+ // Fall back to heapsort if too many bad choices were made.
+ if limit == 0 {
+ heapSortLessFunc(data, a, b, less)
+ return
+ }
+
+ // If the last partitioning was imbalanced, we need to breaking patterns.
+ if !wasBalanced {
+ breakPatternsLessFunc(data, a, b, less)
+ limit--
+ }
+
+ pivot, hint := choosePivotLessFunc(data, a, b, less)
+ if hint == decreasingHint {
+ reverseRangeLessFunc(data, a, b, less)
+ // The chosen pivot was pivot-a elements after the start of the array.
+ // After reversing it is pivot-a elements before the end of the array.
+ // The idea came from Rust's implementation.
+ pivot = (b - 1) - (pivot - a)
+ hint = increasingHint
+ }
+
+ // The slice is likely already sorted.
+ if wasBalanced && wasPartitioned && hint == increasingHint {
+ if partialInsertionSortLessFunc(data, a, b, less) {
+ return
+ }
+ }
+
+ // Probably the slice contains many duplicate elements, partition the slice into
+ // elements equal to and elements greater than the pivot.
+ if a > 0 && !less(data[a-1], data[pivot]) {
+ mid := partitionEqualLessFunc(data, a, b, pivot, less)
+ a = mid
+ continue
+ }
+
+ mid, alreadyPartitioned := partitionLessFunc(data, a, b, pivot, less)
+ wasPartitioned = alreadyPartitioned
+
+ leftLen, rightLen := mid-a, b-mid
+ balanceThreshold := length / 8
+ if leftLen < rightLen {
+ wasBalanced = leftLen >= balanceThreshold
+ pdqsortLessFunc(data, a, mid, limit, less)
+ a = mid + 1
+ } else {
+ wasBalanced = rightLen >= balanceThreshold
+ pdqsortLessFunc(data, mid+1, b, limit, less)
+ b = mid
+ }
+ }
+}
+
+// partitionLessFunc does one quicksort partition.
+// Let p = data[pivot]
+// Moves elements in data[a:b] around, so that data[i]=p for inewpivot.
+// On return, data[newpivot] = p
+func partitionLessFunc[E any](data []E, a, b, pivot int, less func(a, b E) bool) (newpivot int, alreadyPartitioned bool) {
+ data[a], data[pivot] = data[pivot], data[a]
+ i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned
+
+ for i <= j && less(data[i], data[a]) {
+ i++
+ }
+ for i <= j && !less(data[j], data[a]) {
+ j--
+ }
+ if i > j {
+ data[j], data[a] = data[a], data[j]
+ return j, true
+ }
+ data[i], data[j] = data[j], data[i]
+ i++
+ j--
+
+ for {
+ for i <= j && less(data[i], data[a]) {
+ i++
+ }
+ for i <= j && !less(data[j], data[a]) {
+ j--
+ }
+ if i > j {
+ break
+ }
+ data[i], data[j] = data[j], data[i]
+ i++
+ j--
+ }
+ data[j], data[a] = data[a], data[j]
+ return j, false
+}
+
+// partitionEqualLessFunc partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot].
+// It assumed that data[a:b] does not contain elements smaller than the data[pivot].
+func partitionEqualLessFunc[E any](data []E, a, b, pivot int, less func(a, b E) bool) (newpivot int) {
+ data[a], data[pivot] = data[pivot], data[a]
+ i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned
+
+ for {
+ for i <= j && !less(data[a], data[i]) {
+ i++
+ }
+ for i <= j && less(data[a], data[j]) {
+ j--
+ }
+ if i > j {
+ break
+ }
+ data[i], data[j] = data[j], data[i]
+ i++
+ j--
+ }
+ return i
+}
+
+// partialInsertionSortLessFunc partially sorts a slice, returns true if the slice is sorted at the end.
+func partialInsertionSortLessFunc[E any](data []E, a, b int, less func(a, b E) bool) bool {
+ const (
+ maxSteps = 5 // maximum number of adjacent out-of-order pairs that will get shifted
+ shortestShifting = 50 // don't shift any elements on short arrays
+ )
+ i := a + 1
+ for j := 0; j < maxSteps; j++ {
+ for i < b && !less(data[i], data[i-1]) {
+ i++
+ }
+
+ if i == b {
+ return true
+ }
+
+ if b-a < shortestShifting {
+ return false
+ }
+
+ data[i], data[i-1] = data[i-1], data[i]
+
+ // Shift the smaller one to the left.
+ if i-a >= 2 {
+ for j := i - 1; j >= 1; j-- {
+ if !less(data[j], data[j-1]) {
+ break
+ }
+ data[j], data[j-1] = data[j-1], data[j]
+ }
+ }
+ // Shift the greater one to the right.
+ if b-i >= 2 {
+ for j := i + 1; j < b; j++ {
+ if !less(data[j], data[j-1]) {
+ break
+ }
+ data[j], data[j-1] = data[j-1], data[j]
+ }
+ }
+ }
+ return false
+}
+
+// breakPatternsLessFunc scatters some elements around in an attempt to break some patterns
+// that might cause imbalanced partitions in quicksort.
+func breakPatternsLessFunc[E any](data []E, a, b int, less func(a, b E) bool) {
+ length := b - a
+ if length >= 8 {
+ random := xorshift(length)
+ modulus := nextPowerOfTwo(length)
+
+ for idx := a + (length/4)*2 - 1; idx <= a+(length/4)*2+1; idx++ {
+ other := int(uint(random.Next()) & (modulus - 1))
+ if other >= length {
+ other -= length
+ }
+ data[idx], data[a+other] = data[a+other], data[idx]
+ }
+ }
+}
+
+// choosePivotLessFunc chooses a pivot in data[a:b].
+//
+// [0,8): chooses a static pivot.
+// [8,shortestNinther): uses the simple median-of-three method.
+// [shortestNinther,∞): uses the Tukey ninther method.
+func choosePivotLessFunc[E any](data []E, a, b int, less func(a, b E) bool) (pivot int, hint sortedHint) {
+ const (
+ shortestNinther = 50
+ maxSwaps = 4 * 3
+ )
+
+ l := b - a
+
+ var (
+ swaps int
+ i = a + l/4*1
+ j = a + l/4*2
+ k = a + l/4*3
+ )
+
+ if l >= 8 {
+ if l >= shortestNinther {
+ // Tukey ninther method, the idea came from Rust's implementation.
+ i = medianAdjacentLessFunc(data, i, &swaps, less)
+ j = medianAdjacentLessFunc(data, j, &swaps, less)
+ k = medianAdjacentLessFunc(data, k, &swaps, less)
+ }
+ // Find the median among i, j, k and stores it into j.
+ j = medianLessFunc(data, i, j, k, &swaps, less)
+ }
+
+ switch swaps {
+ case 0:
+ return j, increasingHint
+ case maxSwaps:
+ return j, decreasingHint
+ default:
+ return j, unknownHint
+ }
+}
+
+// order2LessFunc returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a.
+func order2LessFunc[E any](data []E, a, b int, swaps *int, less func(a, b E) bool) (int, int) {
+ if less(data[b], data[a]) {
+ *swaps++
+ return b, a
+ }
+ return a, b
+}
+
+// medianLessFunc returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c.
+func medianLessFunc[E any](data []E, a, b, c int, swaps *int, less func(a, b E) bool) int {
+ a, b = order2LessFunc(data, a, b, swaps, less)
+ b, c = order2LessFunc(data, b, c, swaps, less)
+ a, b = order2LessFunc(data, a, b, swaps, less)
+ return b
+}
+
+// medianAdjacentLessFunc finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a.
+func medianAdjacentLessFunc[E any](data []E, a int, swaps *int, less func(a, b E) bool) int {
+ return medianLessFunc(data, a-1, a, a+1, swaps, less)
+}
+
+func reverseRangeLessFunc[E any](data []E, a, b int, less func(a, b E) bool) {
+ i := a
+ j := b - 1
+ for i < j {
+ data[i], data[j] = data[j], data[i]
+ i++
+ j--
+ }
+}
+
+func swapRangeLessFunc[E any](data []E, a, b, n int, less func(a, b E) bool) {
+ for i := 0; i < n; i++ {
+ data[a+i], data[b+i] = data[b+i], data[a+i]
+ }
+}
+
+func stableLessFunc[E any](data []E, n int, less func(a, b E) bool) {
+ blockSize := 20 // must be > 0
+ a, b := 0, blockSize
+ for b <= n {
+ insertionSortLessFunc(data, a, b, less)
+ a = b
+ b += blockSize
+ }
+ insertionSortLessFunc(data, a, n, less)
+
+ for blockSize < n {
+ a, b = 0, 2*blockSize
+ for b <= n {
+ symMergeLessFunc(data, a, a+blockSize, b, less)
+ a = b
+ b += 2 * blockSize
+ }
+ if m := a + blockSize; m < n {
+ symMergeLessFunc(data, a, m, n, less)
+ }
+ blockSize *= 2
+ }
+}
+
+// symMergeLessFunc merges the two sorted subsequences data[a:m] and data[m:b] using
+// the SymMerge algorithm from Pok-Son Kim and Arne Kutzner, "Stable Minimum
+// Storage Merging by Symmetric Comparisons", in Susanne Albers and Tomasz
+// Radzik, editors, Algorithms - ESA 2004, volume 3221 of Lecture Notes in
+// Computer Science, pages 714-723. Springer, 2004.
+//
+// Let M = m-a and N = b-n. Wolog M < N.
+// The recursion depth is bound by ceil(log(N+M)).
+// The algorithm needs O(M*log(N/M + 1)) calls to data.Less.
+// The algorithm needs O((M+N)*log(M)) calls to data.Swap.
+//
+// The paper gives O((M+N)*log(M)) as the number of assignments assuming a
+// rotation algorithm which uses O(M+N+gcd(M+N)) assignments. The argumentation
+// in the paper carries through for Swap operations, especially as the block
+// swapping rotate uses only O(M+N) Swaps.
+//
+// symMerge assumes non-degenerate arguments: a < m && m < b.
+// Having the caller check this condition eliminates many leaf recursion calls,
+// which improves performance.
+func symMergeLessFunc[E any](data []E, a, m, b int, less func(a, b E) bool) {
+ // Avoid unnecessary recursions of symMerge
+ // by direct insertion of data[a] into data[m:b]
+ // if data[a:m] only contains one element.
+ if m-a == 1 {
+ // Use binary search to find the lowest index i
+ // such that data[i] >= data[a] for m <= i < b.
+ // Exit the search loop with i == b in case no such index exists.
+ i := m
+ j := b
+ for i < j {
+ h := int(uint(i+j) >> 1)
+ if less(data[h], data[a]) {
+ i = h + 1
+ } else {
+ j = h
+ }
+ }
+ // Swap values until data[a] reaches the position before i.
+ for k := a; k < i-1; k++ {
+ data[k], data[k+1] = data[k+1], data[k]
+ }
+ return
+ }
+
+ // Avoid unnecessary recursions of symMerge
+ // by direct insertion of data[m] into data[a:m]
+ // if data[m:b] only contains one element.
+ if b-m == 1 {
+ // Use binary search to find the lowest index i
+ // such that data[i] > data[m] for a <= i < m.
+ // Exit the search loop with i == m in case no such index exists.
+ i := a
+ j := m
+ for i < j {
+ h := int(uint(i+j) >> 1)
+ if !less(data[m], data[h]) {
+ i = h + 1
+ } else {
+ j = h
+ }
+ }
+ // Swap values until data[m] reaches the position i.
+ for k := m; k > i; k-- {
+ data[k], data[k-1] = data[k-1], data[k]
+ }
+ return
+ }
+
+ mid := int(uint(a+b) >> 1)
+ n := mid + m
+ var start, r int
+ if m > mid {
+ start = n - b
+ r = mid
+ } else {
+ start = a
+ r = m
+ }
+ p := n - 1
+
+ for start < r {
+ c := int(uint(start+r) >> 1)
+ if !less(data[p-c], data[c]) {
+ start = c + 1
+ } else {
+ r = c
+ }
+ }
+
+ end := n - start
+ if start < m && m < end {
+ rotateLessFunc(data, start, m, end, less)
+ }
+ if a < start && start < mid {
+ symMergeLessFunc(data, a, start, mid, less)
+ }
+ if mid < end && end < b {
+ symMergeLessFunc(data, mid, end, b, less)
+ }
+}
+
+// rotateLessFunc rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data:
+// Data of the form 'x u v y' is changed to 'x v u y'.
+// rotate performs at most b-a many calls to data.Swap,
+// and it assumes non-degenerate arguments: a < m && m < b.
+func rotateLessFunc[E any](data []E, a, m, b int, less func(a, b E) bool) {
+ i := m - a
+ j := b - m
+
+ for i != j {
+ if i > j {
+ swapRangeLessFunc(data, m-i, m, j, less)
+ i -= j
+ } else {
+ swapRangeLessFunc(data, m-i, m+j-i, i, less)
+ j -= i
+ }
+ }
+ // i == j
+ swapRangeLessFunc(data, m-i, m, i, less)
+}
diff --git a/vendor/golang.org/x/exp/slices/zsortordered.go b/vendor/golang.org/x/exp/slices/zsortordered.go
new file mode 100644
index 0000000000..efaa1c8b71
--- /dev/null
+++ b/vendor/golang.org/x/exp/slices/zsortordered.go
@@ -0,0 +1,481 @@
+// Code generated by gen_sort_variants.go; DO NOT EDIT.
+
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package slices
+
+import "golang.org/x/exp/constraints"
+
+// insertionSortOrdered sorts data[a:b] using insertion sort.
+func insertionSortOrdered[E constraints.Ordered](data []E, a, b int) {
+ for i := a + 1; i < b; i++ {
+ for j := i; j > a && (data[j] < data[j-1]); j-- {
+ data[j], data[j-1] = data[j-1], data[j]
+ }
+ }
+}
+
+// siftDownOrdered implements the heap property on data[lo:hi].
+// first is an offset into the array where the root of the heap lies.
+func siftDownOrdered[E constraints.Ordered](data []E, lo, hi, first int) {
+ root := lo
+ for {
+ child := 2*root + 1
+ if child >= hi {
+ break
+ }
+ if child+1 < hi && (data[first+child] < data[first+child+1]) {
+ child++
+ }
+ if !(data[first+root] < data[first+child]) {
+ return
+ }
+ data[first+root], data[first+child] = data[first+child], data[first+root]
+ root = child
+ }
+}
+
+func heapSortOrdered[E constraints.Ordered](data []E, a, b int) {
+ first := a
+ lo := 0
+ hi := b - a
+
+ // Build heap with greatest element at top.
+ for i := (hi - 1) / 2; i >= 0; i-- {
+ siftDownOrdered(data, i, hi, first)
+ }
+
+ // Pop elements, largest first, into end of data.
+ for i := hi - 1; i >= 0; i-- {
+ data[first], data[first+i] = data[first+i], data[first]
+ siftDownOrdered(data, lo, i, first)
+ }
+}
+
+// pdqsortOrdered sorts data[a:b].
+// The algorithm based on pattern-defeating quicksort(pdqsort), but without the optimizations from BlockQuicksort.
+// pdqsort paper: https://arxiv.org/pdf/2106.05123.pdf
+// C++ implementation: https://github.com/orlp/pdqsort
+// Rust implementation: https://docs.rs/pdqsort/latest/pdqsort/
+// limit is the number of allowed bad (very unbalanced) pivots before falling back to heapsort.
+func pdqsortOrdered[E constraints.Ordered](data []E, a, b, limit int) {
+ const maxInsertion = 12
+
+ var (
+ wasBalanced = true // whether the last partitioning was reasonably balanced
+ wasPartitioned = true // whether the slice was already partitioned
+ )
+
+ for {
+ length := b - a
+
+ if length <= maxInsertion {
+ insertionSortOrdered(data, a, b)
+ return
+ }
+
+ // Fall back to heapsort if too many bad choices were made.
+ if limit == 0 {
+ heapSortOrdered(data, a, b)
+ return
+ }
+
+ // If the last partitioning was imbalanced, we need to breaking patterns.
+ if !wasBalanced {
+ breakPatternsOrdered(data, a, b)
+ limit--
+ }
+
+ pivot, hint := choosePivotOrdered(data, a, b)
+ if hint == decreasingHint {
+ reverseRangeOrdered(data, a, b)
+ // The chosen pivot was pivot-a elements after the start of the array.
+ // After reversing it is pivot-a elements before the end of the array.
+ // The idea came from Rust's implementation.
+ pivot = (b - 1) - (pivot - a)
+ hint = increasingHint
+ }
+
+ // The slice is likely already sorted.
+ if wasBalanced && wasPartitioned && hint == increasingHint {
+ if partialInsertionSortOrdered(data, a, b) {
+ return
+ }
+ }
+
+ // Probably the slice contains many duplicate elements, partition the slice into
+ // elements equal to and elements greater than the pivot.
+ if a > 0 && !(data[a-1] < data[pivot]) {
+ mid := partitionEqualOrdered(data, a, b, pivot)
+ a = mid
+ continue
+ }
+
+ mid, alreadyPartitioned := partitionOrdered(data, a, b, pivot)
+ wasPartitioned = alreadyPartitioned
+
+ leftLen, rightLen := mid-a, b-mid
+ balanceThreshold := length / 8
+ if leftLen < rightLen {
+ wasBalanced = leftLen >= balanceThreshold
+ pdqsortOrdered(data, a, mid, limit)
+ a = mid + 1
+ } else {
+ wasBalanced = rightLen >= balanceThreshold
+ pdqsortOrdered(data, mid+1, b, limit)
+ b = mid
+ }
+ }
+}
+
+// partitionOrdered does one quicksort partition.
+// Let p = data[pivot]
+// Moves elements in data[a:b] around, so that data[i]=p for inewpivot.
+// On return, data[newpivot] = p
+func partitionOrdered[E constraints.Ordered](data []E, a, b, pivot int) (newpivot int, alreadyPartitioned bool) {
+ data[a], data[pivot] = data[pivot], data[a]
+ i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned
+
+ for i <= j && (data[i] < data[a]) {
+ i++
+ }
+ for i <= j && !(data[j] < data[a]) {
+ j--
+ }
+ if i > j {
+ data[j], data[a] = data[a], data[j]
+ return j, true
+ }
+ data[i], data[j] = data[j], data[i]
+ i++
+ j--
+
+ for {
+ for i <= j && (data[i] < data[a]) {
+ i++
+ }
+ for i <= j && !(data[j] < data[a]) {
+ j--
+ }
+ if i > j {
+ break
+ }
+ data[i], data[j] = data[j], data[i]
+ i++
+ j--
+ }
+ data[j], data[a] = data[a], data[j]
+ return j, false
+}
+
+// partitionEqualOrdered partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot].
+// It assumed that data[a:b] does not contain elements smaller than the data[pivot].
+func partitionEqualOrdered[E constraints.Ordered](data []E, a, b, pivot int) (newpivot int) {
+ data[a], data[pivot] = data[pivot], data[a]
+ i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned
+
+ for {
+ for i <= j && !(data[a] < data[i]) {
+ i++
+ }
+ for i <= j && (data[a] < data[j]) {
+ j--
+ }
+ if i > j {
+ break
+ }
+ data[i], data[j] = data[j], data[i]
+ i++
+ j--
+ }
+ return i
+}
+
+// partialInsertionSortOrdered partially sorts a slice, returns true if the slice is sorted at the end.
+func partialInsertionSortOrdered[E constraints.Ordered](data []E, a, b int) bool {
+ const (
+ maxSteps = 5 // maximum number of adjacent out-of-order pairs that will get shifted
+ shortestShifting = 50 // don't shift any elements on short arrays
+ )
+ i := a + 1
+ for j := 0; j < maxSteps; j++ {
+ for i < b && !(data[i] < data[i-1]) {
+ i++
+ }
+
+ if i == b {
+ return true
+ }
+
+ if b-a < shortestShifting {
+ return false
+ }
+
+ data[i], data[i-1] = data[i-1], data[i]
+
+ // Shift the smaller one to the left.
+ if i-a >= 2 {
+ for j := i - 1; j >= 1; j-- {
+ if !(data[j] < data[j-1]) {
+ break
+ }
+ data[j], data[j-1] = data[j-1], data[j]
+ }
+ }
+ // Shift the greater one to the right.
+ if b-i >= 2 {
+ for j := i + 1; j < b; j++ {
+ if !(data[j] < data[j-1]) {
+ break
+ }
+ data[j], data[j-1] = data[j-1], data[j]
+ }
+ }
+ }
+ return false
+}
+
+// breakPatternsOrdered scatters some elements around in an attempt to break some patterns
+// that might cause imbalanced partitions in quicksort.
+func breakPatternsOrdered[E constraints.Ordered](data []E, a, b int) {
+ length := b - a
+ if length >= 8 {
+ random := xorshift(length)
+ modulus := nextPowerOfTwo(length)
+
+ for idx := a + (length/4)*2 - 1; idx <= a+(length/4)*2+1; idx++ {
+ other := int(uint(random.Next()) & (modulus - 1))
+ if other >= length {
+ other -= length
+ }
+ data[idx], data[a+other] = data[a+other], data[idx]
+ }
+ }
+}
+
+// choosePivotOrdered chooses a pivot in data[a:b].
+//
+// [0,8): chooses a static pivot.
+// [8,shortestNinther): uses the simple median-of-three method.
+// [shortestNinther,∞): uses the Tukey ninther method.
+func choosePivotOrdered[E constraints.Ordered](data []E, a, b int) (pivot int, hint sortedHint) {
+ const (
+ shortestNinther = 50
+ maxSwaps = 4 * 3
+ )
+
+ l := b - a
+
+ var (
+ swaps int
+ i = a + l/4*1
+ j = a + l/4*2
+ k = a + l/4*3
+ )
+
+ if l >= 8 {
+ if l >= shortestNinther {
+ // Tukey ninther method, the idea came from Rust's implementation.
+ i = medianAdjacentOrdered(data, i, &swaps)
+ j = medianAdjacentOrdered(data, j, &swaps)
+ k = medianAdjacentOrdered(data, k, &swaps)
+ }
+ // Find the median among i, j, k and stores it into j.
+ j = medianOrdered(data, i, j, k, &swaps)
+ }
+
+ switch swaps {
+ case 0:
+ return j, increasingHint
+ case maxSwaps:
+ return j, decreasingHint
+ default:
+ return j, unknownHint
+ }
+}
+
+// order2Ordered returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a.
+func order2Ordered[E constraints.Ordered](data []E, a, b int, swaps *int) (int, int) {
+ if data[b] < data[a] {
+ *swaps++
+ return b, a
+ }
+ return a, b
+}
+
+// medianOrdered returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c.
+func medianOrdered[E constraints.Ordered](data []E, a, b, c int, swaps *int) int {
+ a, b = order2Ordered(data, a, b, swaps)
+ b, c = order2Ordered(data, b, c, swaps)
+ a, b = order2Ordered(data, a, b, swaps)
+ return b
+}
+
+// medianAdjacentOrdered finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a.
+func medianAdjacentOrdered[E constraints.Ordered](data []E, a int, swaps *int) int {
+ return medianOrdered(data, a-1, a, a+1, swaps)
+}
+
+func reverseRangeOrdered[E constraints.Ordered](data []E, a, b int) {
+ i := a
+ j := b - 1
+ for i < j {
+ data[i], data[j] = data[j], data[i]
+ i++
+ j--
+ }
+}
+
+func swapRangeOrdered[E constraints.Ordered](data []E, a, b, n int) {
+ for i := 0; i < n; i++ {
+ data[a+i], data[b+i] = data[b+i], data[a+i]
+ }
+}
+
+func stableOrdered[E constraints.Ordered](data []E, n int) {
+ blockSize := 20 // must be > 0
+ a, b := 0, blockSize
+ for b <= n {
+ insertionSortOrdered(data, a, b)
+ a = b
+ b += blockSize
+ }
+ insertionSortOrdered(data, a, n)
+
+ for blockSize < n {
+ a, b = 0, 2*blockSize
+ for b <= n {
+ symMergeOrdered(data, a, a+blockSize, b)
+ a = b
+ b += 2 * blockSize
+ }
+ if m := a + blockSize; m < n {
+ symMergeOrdered(data, a, m, n)
+ }
+ blockSize *= 2
+ }
+}
+
+// symMergeOrdered merges the two sorted subsequences data[a:m] and data[m:b] using
+// the SymMerge algorithm from Pok-Son Kim and Arne Kutzner, "Stable Minimum
+// Storage Merging by Symmetric Comparisons", in Susanne Albers and Tomasz
+// Radzik, editors, Algorithms - ESA 2004, volume 3221 of Lecture Notes in
+// Computer Science, pages 714-723. Springer, 2004.
+//
+// Let M = m-a and N = b-n. Wolog M < N.
+// The recursion depth is bound by ceil(log(N+M)).
+// The algorithm needs O(M*log(N/M + 1)) calls to data.Less.
+// The algorithm needs O((M+N)*log(M)) calls to data.Swap.
+//
+// The paper gives O((M+N)*log(M)) as the number of assignments assuming a
+// rotation algorithm which uses O(M+N+gcd(M+N)) assignments. The argumentation
+// in the paper carries through for Swap operations, especially as the block
+// swapping rotate uses only O(M+N) Swaps.
+//
+// symMerge assumes non-degenerate arguments: a < m && m < b.
+// Having the caller check this condition eliminates many leaf recursion calls,
+// which improves performance.
+func symMergeOrdered[E constraints.Ordered](data []E, a, m, b int) {
+ // Avoid unnecessary recursions of symMerge
+ // by direct insertion of data[a] into data[m:b]
+ // if data[a:m] only contains one element.
+ if m-a == 1 {
+ // Use binary search to find the lowest index i
+ // such that data[i] >= data[a] for m <= i < b.
+ // Exit the search loop with i == b in case no such index exists.
+ i := m
+ j := b
+ for i < j {
+ h := int(uint(i+j) >> 1)
+ if data[h] < data[a] {
+ i = h + 1
+ } else {
+ j = h
+ }
+ }
+ // Swap values until data[a] reaches the position before i.
+ for k := a; k < i-1; k++ {
+ data[k], data[k+1] = data[k+1], data[k]
+ }
+ return
+ }
+
+ // Avoid unnecessary recursions of symMerge
+ // by direct insertion of data[m] into data[a:m]
+ // if data[m:b] only contains one element.
+ if b-m == 1 {
+ // Use binary search to find the lowest index i
+ // such that data[i] > data[m] for a <= i < m.
+ // Exit the search loop with i == m in case no such index exists.
+ i := a
+ j := m
+ for i < j {
+ h := int(uint(i+j) >> 1)
+ if !(data[m] < data[h]) {
+ i = h + 1
+ } else {
+ j = h
+ }
+ }
+ // Swap values until data[m] reaches the position i.
+ for k := m; k > i; k-- {
+ data[k], data[k-1] = data[k-1], data[k]
+ }
+ return
+ }
+
+ mid := int(uint(a+b) >> 1)
+ n := mid + m
+ var start, r int
+ if m > mid {
+ start = n - b
+ r = mid
+ } else {
+ start = a
+ r = m
+ }
+ p := n - 1
+
+ for start < r {
+ c := int(uint(start+r) >> 1)
+ if !(data[p-c] < data[c]) {
+ start = c + 1
+ } else {
+ r = c
+ }
+ }
+
+ end := n - start
+ if start < m && m < end {
+ rotateOrdered(data, start, m, end)
+ }
+ if a < start && start < mid {
+ symMergeOrdered(data, a, start, mid)
+ }
+ if mid < end && end < b {
+ symMergeOrdered(data, mid, end, b)
+ }
+}
+
+// rotateOrdered rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data:
+// Data of the form 'x u v y' is changed to 'x v u y'.
+// rotate performs at most b-a many calls to data.Swap,
+// and it assumes non-degenerate arguments: a < m && m < b.
+func rotateOrdered[E constraints.Ordered](data []E, a, m, b int) {
+ i := m - a
+ j := b - m
+
+ for i != j {
+ if i > j {
+ swapRangeOrdered(data, m-i, m, j)
+ i -= j
+ } else {
+ swapRangeOrdered(data, m-i, m+j-i, i)
+ j -= i
+ }
+ }
+ // i == j
+ swapRangeOrdered(data, m-i, m, i)
+}
diff --git a/vendor/golang.org/x/sync/errgroup/errgroup.go b/vendor/golang.org/x/sync/errgroup/errgroup.go
new file mode 100644
index 0000000000..b18efb743f
--- /dev/null
+++ b/vendor/golang.org/x/sync/errgroup/errgroup.go
@@ -0,0 +1,132 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package errgroup provides synchronization, error propagation, and Context
+// cancelation for groups of goroutines working on subtasks of a common task.
+package errgroup
+
+import (
+ "context"
+ "fmt"
+ "sync"
+)
+
+type token struct{}
+
+// A Group is a collection of goroutines working on subtasks that are part of
+// the same overall task.
+//
+// A zero Group is valid, has no limit on the number of active goroutines,
+// and does not cancel on error.
+type Group struct {
+ cancel func(error)
+
+ wg sync.WaitGroup
+
+ sem chan token
+
+ errOnce sync.Once
+ err error
+}
+
+func (g *Group) done() {
+ if g.sem != nil {
+ <-g.sem
+ }
+ g.wg.Done()
+}
+
+// WithContext returns a new Group and an associated Context derived from ctx.
+//
+// The derived Context is canceled the first time a function passed to Go
+// returns a non-nil error or the first time Wait returns, whichever occurs
+// first.
+func WithContext(ctx context.Context) (*Group, context.Context) {
+ ctx, cancel := withCancelCause(ctx)
+ return &Group{cancel: cancel}, ctx
+}
+
+// Wait blocks until all function calls from the Go method have returned, then
+// returns the first non-nil error (if any) from them.
+func (g *Group) Wait() error {
+ g.wg.Wait()
+ if g.cancel != nil {
+ g.cancel(g.err)
+ }
+ return g.err
+}
+
+// Go calls the given function in a new goroutine.
+// It blocks until the new goroutine can be added without the number of
+// active goroutines in the group exceeding the configured limit.
+//
+// The first call to return a non-nil error cancels the group's context, if the
+// group was created by calling WithContext. The error will be returned by Wait.
+func (g *Group) Go(f func() error) {
+ if g.sem != nil {
+ g.sem <- token{}
+ }
+
+ g.wg.Add(1)
+ go func() {
+ defer g.done()
+
+ if err := f(); err != nil {
+ g.errOnce.Do(func() {
+ g.err = err
+ if g.cancel != nil {
+ g.cancel(g.err)
+ }
+ })
+ }
+ }()
+}
+
+// TryGo calls the given function in a new goroutine only if the number of
+// active goroutines in the group is currently below the configured limit.
+//
+// The return value reports whether the goroutine was started.
+func (g *Group) TryGo(f func() error) bool {
+ if g.sem != nil {
+ select {
+ case g.sem <- token{}:
+ // Note: this allows barging iff channels in general allow barging.
+ default:
+ return false
+ }
+ }
+
+ g.wg.Add(1)
+ go func() {
+ defer g.done()
+
+ if err := f(); err != nil {
+ g.errOnce.Do(func() {
+ g.err = err
+ if g.cancel != nil {
+ g.cancel(g.err)
+ }
+ })
+ }
+ }()
+ return true
+}
+
+// SetLimit limits the number of active goroutines in this group to at most n.
+// A negative value indicates no limit.
+//
+// Any subsequent call to the Go method will block until it can add an active
+// goroutine without exceeding the configured limit.
+//
+// The limit must not be modified while any goroutines in the group are active.
+func (g *Group) SetLimit(n int) {
+ if n < 0 {
+ g.sem = nil
+ return
+ }
+ if len(g.sem) != 0 {
+ panic(fmt.Errorf("errgroup: modify limit while %v goroutines in the group are still active", len(g.sem)))
+ }
+ g.sem = make(chan token, n)
+}
diff --git a/vendor/golang.org/x/sync/errgroup/go120.go b/vendor/golang.org/x/sync/errgroup/go120.go
new file mode 100644
index 0000000000..f93c740b63
--- /dev/null
+++ b/vendor/golang.org/x/sync/errgroup/go120.go
@@ -0,0 +1,13 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.20
+
+package errgroup
+
+import "context"
+
+func withCancelCause(parent context.Context) (context.Context, func(error)) {
+ return context.WithCancelCause(parent)
+}
diff --git a/vendor/golang.org/x/sync/errgroup/pre_go120.go b/vendor/golang.org/x/sync/errgroup/pre_go120.go
new file mode 100644
index 0000000000..88ce33434e
--- /dev/null
+++ b/vendor/golang.org/x/sync/errgroup/pre_go120.go
@@ -0,0 +1,14 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !go1.20
+
+package errgroup
+
+import "context"
+
+func withCancelCause(parent context.Context) (context.Context, func(error)) {
+ ctx, cancel := context.WithCancel(parent)
+ return ctx, func(error) { cancel() }
+}
diff --git a/vendor/golang.org/x/text/feature/plural/common.go b/vendor/golang.org/x/text/feature/plural/common.go
new file mode 100644
index 0000000000..fdcb373fdf
--- /dev/null
+++ b/vendor/golang.org/x/text/feature/plural/common.go
@@ -0,0 +1,70 @@
+// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
+
+package plural
+
+// Form defines a plural form.
+//
+// Not all languages support all forms. Also, the meaning of each form varies
+// per language. It is important to note that the name of a form does not
+// necessarily correspond one-to-one with the set of numbers. For instance,
+// for Croation, One matches not only 1, but also 11, 21, etc.
+//
+// Each language must at least support the form "other".
+type Form byte
+
+const (
+ Other Form = iota
+ Zero
+ One
+ Two
+ Few
+ Many
+)
+
+var countMap = map[string]Form{
+ "other": Other,
+ "zero": Zero,
+ "one": One,
+ "two": Two,
+ "few": Few,
+ "many": Many,
+}
+
+type pluralCheck struct {
+ // category:
+ // 3..7: opID
+ // 0..2: category
+ cat byte
+ setID byte
+}
+
+// opID identifies the type of operand in the plural rule, being i, n or f.
+// (v, w, and t are treated as filters in our implementation.)
+type opID byte
+
+const (
+ opMod opID = 0x1 // is '%' used?
+ opNotEqual opID = 0x2 // using "!=" to compare
+ opI opID = 0 << 2 // integers after taking the absolute value
+ opN opID = 1 << 2 // full number (must be integer)
+ opF opID = 2 << 2 // fraction
+ opV opID = 3 << 2 // number of visible digits
+ opW opID = 4 << 2 // number of visible digits without trailing zeros
+ opBretonM opID = 5 << 2 // hard-wired rule for Breton
+ opItalian800 opID = 6 << 2 // hard-wired rule for Italian
+ opAzerbaijan00s opID = 7 << 2 // hard-wired rule for Azerbaijan
+)
+const (
+ // Use this plural form to indicate the next rule needs to match as well.
+ // The last condition in the list will have the correct plural form.
+ andNext = 0x7
+ formMask = 0x7
+
+ opShift = 3
+
+ // numN indicates the maximum integer, or maximum mod value, for which we
+ // have inclusion masks.
+ numN = 100
+ // The common denominator of the modulo that is taken.
+ maxMod = 100
+)
diff --git a/vendor/golang.org/x/text/feature/plural/message.go b/vendor/golang.org/x/text/feature/plural/message.go
new file mode 100644
index 0000000000..56d518cc34
--- /dev/null
+++ b/vendor/golang.org/x/text/feature/plural/message.go
@@ -0,0 +1,244 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package plural
+
+import (
+ "fmt"
+ "io"
+ "reflect"
+ "strconv"
+
+ "golang.org/x/text/internal/catmsg"
+ "golang.org/x/text/internal/number"
+ "golang.org/x/text/language"
+ "golang.org/x/text/message/catalog"
+)
+
+// TODO: consider deleting this interface. Maybe VisibleDigits is always
+// sufficient and practical.
+
+// Interface is used for types that can determine their own plural form.
+type Interface interface {
+ // PluralForm reports the plural form for the given language of the
+ // underlying value. It also returns the integer value. If the integer value
+ // is larger than fits in n, PluralForm may return a value modulo
+ // 10,000,000.
+ PluralForm(t language.Tag, scale int) (f Form, n int)
+}
+
+// Selectf returns the first case for which its selector is a match for the
+// arg-th substitution argument to a formatting call, formatting it as indicated
+// by format.
+//
+// The cases argument are pairs of selectors and messages. Selectors are of type
+// string or Form. Messages are of type string or catalog.Message. A selector
+// matches an argument if:
+// - it is "other" or Other
+// - it matches the plural form of the argument: "zero", "one", "two", "few",
+// or "many", or the equivalent Form
+// - it is of the form "=x" where x is an integer that matches the value of
+// the argument.
+// - it is of the form " kindDefault {
+ e.EncodeUint(uint64(m.scale))
+ }
+
+ forms := validForms(cardinal, e.Language())
+
+ for i := 0; i < len(m.cases); {
+ if err := compileSelector(e, forms, m.cases[i]); err != nil {
+ return err
+ }
+ if i++; i >= len(m.cases) {
+ return fmt.Errorf("plural: no message defined for selector %v", m.cases[i-1])
+ }
+ var msg catalog.Message
+ switch x := m.cases[i].(type) {
+ case string:
+ msg = catalog.String(x)
+ case catalog.Message:
+ msg = x
+ default:
+ return fmt.Errorf("plural: message of type %T; must be string or catalog.Message", x)
+ }
+ if err := e.EncodeMessage(msg); err != nil {
+ return err
+ }
+ i++
+ }
+ return nil
+}
+
+func compileSelector(e *catmsg.Encoder, valid []Form, selector interface{}) error {
+ form := Other
+ switch x := selector.(type) {
+ case string:
+ if x == "" {
+ return fmt.Errorf("plural: empty selector")
+ }
+ if c := x[0]; c == '=' || c == '<' {
+ val, err := strconv.ParseUint(x[1:], 10, 16)
+ if err != nil {
+ return fmt.Errorf("plural: invalid number in selector %q: %v", selector, err)
+ }
+ e.EncodeUint(uint64(c))
+ e.EncodeUint(val)
+ return nil
+ }
+ var ok bool
+ form, ok = countMap[x]
+ if !ok {
+ return fmt.Errorf("plural: invalid plural form %q", selector)
+ }
+ case Form:
+ form = x
+ default:
+ return fmt.Errorf("plural: selector of type %T; want string or Form", selector)
+ }
+
+ ok := false
+ for _, f := range valid {
+ if f == form {
+ ok = true
+ break
+ }
+ }
+ if !ok {
+ return fmt.Errorf("plural: form %q not supported for language %q", selector, e.Language())
+ }
+ e.EncodeUint(uint64(form))
+ return nil
+}
+
+func execute(d *catmsg.Decoder) bool {
+ lang := d.Language()
+ argN := int(d.DecodeUint())
+ kind := int(d.DecodeUint())
+ scale := -1 // default
+ if kind > kindDefault {
+ scale = int(d.DecodeUint())
+ }
+ form := Other
+ n := -1
+ if arg := d.Arg(argN); arg == nil {
+ // Default to Other.
+ } else if x, ok := arg.(number.VisibleDigits); ok {
+ d := x.Digits(nil, lang, scale)
+ form, n = cardinal.matchDisplayDigits(lang, &d)
+ } else if x, ok := arg.(Interface); ok {
+ // This covers lists and formatters from the number package.
+ form, n = x.PluralForm(lang, scale)
+ } else {
+ var f number.Formatter
+ switch kind {
+ case kindScale:
+ f.InitDecimal(lang)
+ f.SetScale(scale)
+ case kindScientific:
+ f.InitScientific(lang)
+ f.SetScale(scale)
+ case kindPrecision:
+ f.InitDecimal(lang)
+ f.SetPrecision(scale)
+ case kindDefault:
+ // sensible default
+ f.InitDecimal(lang)
+ if k := reflect.TypeOf(arg).Kind(); reflect.Int <= k && k <= reflect.Uintptr {
+ f.SetScale(0)
+ } else {
+ f.SetScale(2)
+ }
+ }
+ var dec number.Decimal // TODO: buffer in Printer
+ dec.Convert(f.RoundingContext, arg)
+ v := number.FormatDigits(&dec, f.RoundingContext)
+ if !v.NaN && !v.Inf {
+ form, n = cardinal.matchDisplayDigits(d.Language(), &v)
+ }
+ }
+ for !d.Done() {
+ f := d.DecodeUint()
+ if (f == '=' && n == int(d.DecodeUint())) ||
+ (f == '<' && 0 <= n && n < int(d.DecodeUint())) ||
+ form == Form(f) ||
+ Other == Form(f) {
+ return d.ExecuteMessage()
+ }
+ d.SkipMessage()
+ }
+ return false
+}
diff --git a/vendor/golang.org/x/text/feature/plural/plural.go b/vendor/golang.org/x/text/feature/plural/plural.go
new file mode 100644
index 0000000000..e9f2d42e04
--- /dev/null
+++ b/vendor/golang.org/x/text/feature/plural/plural.go
@@ -0,0 +1,262 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:generate go run gen.go gen_common.go
+
+// Package plural provides utilities for handling linguistic plurals in text.
+//
+// The definitions in this package are based on the plural rule handling defined
+// in CLDR. See
+// https://unicode.org/reports/tr35/tr35-numbers.html#Language_Plural_Rules for
+// details.
+package plural
+
+import (
+ "golang.org/x/text/internal/language/compact"
+ "golang.org/x/text/internal/number"
+ "golang.org/x/text/language"
+)
+
+// Rules defines the plural rules for all languages for a certain plural type.
+//
+// This package is UNDER CONSTRUCTION and its API may change.
+type Rules struct {
+ rules []pluralCheck
+ index []byte
+ langToIndex []byte
+ inclusionMasks []uint64
+}
+
+var (
+ // Cardinal defines the plural rules for numbers indicating quantities.
+ Cardinal *Rules = cardinal
+
+ // Ordinal defines the plural rules for numbers indicating position
+ // (first, second, etc.).
+ Ordinal *Rules = ordinal
+
+ ordinal = &Rules{
+ ordinalRules,
+ ordinalIndex,
+ ordinalLangToIndex,
+ ordinalInclusionMasks[:],
+ }
+
+ cardinal = &Rules{
+ cardinalRules,
+ cardinalIndex,
+ cardinalLangToIndex,
+ cardinalInclusionMasks[:],
+ }
+)
+
+// getIntApprox converts the digits in slice digits[start:end] to an integer
+// according to the following rules:
+// - Let i be asInt(digits[start:end]), where out-of-range digits are assumed
+// to be zero.
+// - Result n is big if i / 10^nMod > 1.
+// - Otherwise the result is i % 10^nMod.
+//
+// For example, if digits is {1, 2, 3} and start:end is 0:5, then the result
+// for various values of nMod is:
+// - when nMod == 2, n == big
+// - when nMod == 3, n == big
+// - when nMod == 4, n == big
+// - when nMod == 5, n == 12300
+// - when nMod == 6, n == 12300
+// - when nMod == 7, n == 12300
+func getIntApprox(digits []byte, start, end, nMod, big int) (n int) {
+ // Leading 0 digits just result in 0.
+ p := start
+ if p < 0 {
+ p = 0
+ }
+ // Range only over the part for which we have digits.
+ mid := end
+ if mid >= len(digits) {
+ mid = len(digits)
+ }
+ // Check digits more significant that nMod.
+ if q := end - nMod; q > 0 {
+ if q > mid {
+ q = mid
+ }
+ for ; p < q; p++ {
+ if digits[p] != 0 {
+ return big
+ }
+ }
+ }
+ for ; p < mid; p++ {
+ n = 10*n + int(digits[p])
+ }
+ // Multiply for trailing zeros.
+ for ; p < end; p++ {
+ n *= 10
+ }
+ return n
+}
+
+// MatchDigits computes the plural form for the given language and the given
+// decimal floating point digits. The digits are stored in big-endian order and
+// are of value byte(0) - byte(9). The floating point position is indicated by
+// exp and the number of visible decimals is scale. All leading and trailing
+// zeros may be omitted from digits.
+//
+// The following table contains examples of possible arguments to represent
+// the given numbers.
+//
+// decimal digits exp scale
+// 123 []byte{1, 2, 3} 3 0
+// 123.4 []byte{1, 2, 3, 4} 3 1
+// 123.40 []byte{1, 2, 3, 4} 3 2
+// 100000 []byte{1} 6 0
+// 100000.00 []byte{1} 6 3
+func (p *Rules) MatchDigits(t language.Tag, digits []byte, exp, scale int) Form {
+ index := tagToID(t)
+
+ // Differentiate up to including mod 1000000 for the integer part.
+ n := getIntApprox(digits, 0, exp, 6, 1000000)
+
+ // Differentiate up to including mod 100 for the fractional part.
+ f := getIntApprox(digits, exp, exp+scale, 2, 100)
+
+ return matchPlural(p, index, n, f, scale)
+}
+
+func (p *Rules) matchDisplayDigits(t language.Tag, d *number.Digits) (Form, int) {
+ n := getIntApprox(d.Digits, 0, int(d.Exp), 6, 1000000)
+ return p.MatchDigits(t, d.Digits, int(d.Exp), d.NumFracDigits()), n
+}
+
+func validForms(p *Rules, t language.Tag) (forms []Form) {
+ offset := p.langToIndex[tagToID(t)]
+ rules := p.rules[p.index[offset]:p.index[offset+1]]
+
+ forms = append(forms, Other)
+ last := Other
+ for _, r := range rules {
+ if cat := Form(r.cat & formMask); cat != andNext && last != cat {
+ forms = append(forms, cat)
+ last = cat
+ }
+ }
+ return forms
+}
+
+func (p *Rules) matchComponents(t language.Tag, n, f, scale int) Form {
+ return matchPlural(p, tagToID(t), n, f, scale)
+}
+
+// MatchPlural returns the plural form for the given language and plural
+// operands (as defined in
+// https://unicode.org/reports/tr35/tr35-numbers.html#Language_Plural_Rules):
+//
+// where
+// n absolute value of the source number (integer and decimals)
+// input
+// i integer digits of n.
+// v number of visible fraction digits in n, with trailing zeros.
+// w number of visible fraction digits in n, without trailing zeros.
+// f visible fractional digits in n, with trailing zeros (f = t * 10^(v-w))
+// t visible fractional digits in n, without trailing zeros.
+//
+// If any of the operand values is too large to fit in an int, it is okay to
+// pass the value modulo 10,000,000.
+func (p *Rules) MatchPlural(lang language.Tag, i, v, w, f, t int) Form {
+ return matchPlural(p, tagToID(lang), i, f, v)
+}
+
+func matchPlural(p *Rules, index compact.ID, n, f, v int) Form {
+ nMask := p.inclusionMasks[n%maxMod]
+ // Compute the fMask inline in the rules below, as it is relatively rare.
+ // fMask := p.inclusionMasks[f%maxMod]
+ vMask := p.inclusionMasks[v%maxMod]
+
+ // Do the matching
+ offset := p.langToIndex[index]
+ rules := p.rules[p.index[offset]:p.index[offset+1]]
+ for i := 0; i < len(rules); i++ {
+ rule := rules[i]
+ setBit := uint64(1 << rule.setID)
+ var skip bool
+ switch op := opID(rule.cat >> opShift); op {
+ case opI: // i = x
+ skip = n >= numN || nMask&setBit == 0
+
+ case opI | opNotEqual: // i != x
+ skip = n < numN && nMask&setBit != 0
+
+ case opI | opMod: // i % m = x
+ skip = nMask&setBit == 0
+
+ case opI | opMod | opNotEqual: // i % m != x
+ skip = nMask&setBit != 0
+
+ case opN: // n = x
+ skip = f != 0 || n >= numN || nMask&setBit == 0
+
+ case opN | opNotEqual: // n != x
+ skip = f == 0 && n < numN && nMask&setBit != 0
+
+ case opN | opMod: // n % m = x
+ skip = f != 0 || nMask&setBit == 0
+
+ case opN | opMod | opNotEqual: // n % m != x
+ skip = f == 0 && nMask&setBit != 0
+
+ case opF: // f = x
+ skip = f >= numN || p.inclusionMasks[f%maxMod]&setBit == 0
+
+ case opF | opNotEqual: // f != x
+ skip = f < numN && p.inclusionMasks[f%maxMod]&setBit != 0
+
+ case opF | opMod: // f % m = x
+ skip = p.inclusionMasks[f%maxMod]&setBit == 0
+
+ case opF | opMod | opNotEqual: // f % m != x
+ skip = p.inclusionMasks[f%maxMod]&setBit != 0
+
+ case opV: // v = x
+ skip = v < numN && vMask&setBit == 0
+
+ case opV | opNotEqual: // v != x
+ skip = v < numN && vMask&setBit != 0
+
+ case opW: // w == 0
+ skip = f != 0
+
+ case opW | opNotEqual: // w != 0
+ skip = f == 0
+
+ // Hard-wired rules that cannot be handled by our algorithm.
+
+ case opBretonM:
+ skip = f != 0 || n == 0 || n%1000000 != 0
+
+ case opAzerbaijan00s:
+ // 100,200,300,400,500,600,700,800,900
+ skip = n == 0 || n >= 1000 || n%100 != 0
+
+ case opItalian800:
+ skip = (f != 0 || n >= numN || nMask&setBit == 0) && n != 800
+ }
+ if skip {
+ // advance over AND entries.
+ for ; i < len(rules) && rules[i].cat&formMask == andNext; i++ {
+ }
+ continue
+ }
+ // return if we have a final entry.
+ if cat := rule.cat & formMask; cat != andNext {
+ return Form(cat)
+ }
+ }
+ return Other
+}
+
+func tagToID(t language.Tag) compact.ID {
+ id, _ := compact.RegionalID(compact.Tag(t))
+ return id
+}
diff --git a/vendor/golang.org/x/text/feature/plural/tables.go b/vendor/golang.org/x/text/feature/plural/tables.go
new file mode 100644
index 0000000000..b06b9cb4ea
--- /dev/null
+++ b/vendor/golang.org/x/text/feature/plural/tables.go
@@ -0,0 +1,552 @@
+// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
+
+package plural
+
+// CLDRVersion is the CLDR version from which the tables in this package are derived.
+const CLDRVersion = "32"
+
+var ordinalRules = []pluralCheck{ // 64 elements
+ 0: {cat: 0x2f, setID: 0x4},
+ 1: {cat: 0x3a, setID: 0x5},
+ 2: {cat: 0x22, setID: 0x1},
+ 3: {cat: 0x22, setID: 0x6},
+ 4: {cat: 0x22, setID: 0x7},
+ 5: {cat: 0x2f, setID: 0x8},
+ 6: {cat: 0x3c, setID: 0x9},
+ 7: {cat: 0x2f, setID: 0xa},
+ 8: {cat: 0x3c, setID: 0xb},
+ 9: {cat: 0x2c, setID: 0xc},
+ 10: {cat: 0x24, setID: 0xd},
+ 11: {cat: 0x2d, setID: 0xe},
+ 12: {cat: 0x2d, setID: 0xf},
+ 13: {cat: 0x2f, setID: 0x10},
+ 14: {cat: 0x35, setID: 0x3},
+ 15: {cat: 0xc5, setID: 0x11},
+ 16: {cat: 0x2, setID: 0x1},
+ 17: {cat: 0x5, setID: 0x3},
+ 18: {cat: 0xd, setID: 0x12},
+ 19: {cat: 0x22, setID: 0x1},
+ 20: {cat: 0x2f, setID: 0x13},
+ 21: {cat: 0x3d, setID: 0x14},
+ 22: {cat: 0x2f, setID: 0x15},
+ 23: {cat: 0x3a, setID: 0x16},
+ 24: {cat: 0x2f, setID: 0x17},
+ 25: {cat: 0x3b, setID: 0x18},
+ 26: {cat: 0x2f, setID: 0xa},
+ 27: {cat: 0x3c, setID: 0xb},
+ 28: {cat: 0x22, setID: 0x1},
+ 29: {cat: 0x23, setID: 0x19},
+ 30: {cat: 0x24, setID: 0x1a},
+ 31: {cat: 0x22, setID: 0x1b},
+ 32: {cat: 0x23, setID: 0x2},
+ 33: {cat: 0x24, setID: 0x1a},
+ 34: {cat: 0xf, setID: 0x15},
+ 35: {cat: 0x1a, setID: 0x16},
+ 36: {cat: 0xf, setID: 0x17},
+ 37: {cat: 0x1b, setID: 0x18},
+ 38: {cat: 0xf, setID: 0x1c},
+ 39: {cat: 0x1d, setID: 0x1d},
+ 40: {cat: 0xa, setID: 0x1e},
+ 41: {cat: 0xa, setID: 0x1f},
+ 42: {cat: 0xc, setID: 0x20},
+ 43: {cat: 0xe4, setID: 0x0},
+ 44: {cat: 0x5, setID: 0x3},
+ 45: {cat: 0xd, setID: 0xe},
+ 46: {cat: 0xd, setID: 0x21},
+ 47: {cat: 0x22, setID: 0x1},
+ 48: {cat: 0x23, setID: 0x19},
+ 49: {cat: 0x24, setID: 0x1a},
+ 50: {cat: 0x25, setID: 0x22},
+ 51: {cat: 0x22, setID: 0x23},
+ 52: {cat: 0x23, setID: 0x19},
+ 53: {cat: 0x24, setID: 0x1a},
+ 54: {cat: 0x25, setID: 0x22},
+ 55: {cat: 0x22, setID: 0x24},
+ 56: {cat: 0x23, setID: 0x19},
+ 57: {cat: 0x24, setID: 0x1a},
+ 58: {cat: 0x25, setID: 0x22},
+ 59: {cat: 0x21, setID: 0x25},
+ 60: {cat: 0x22, setID: 0x1},
+ 61: {cat: 0x23, setID: 0x2},
+ 62: {cat: 0x24, setID: 0x26},
+ 63: {cat: 0x25, setID: 0x27},
+} // Size: 152 bytes
+
+var ordinalIndex = []uint8{ // 22 elements
+ 0x00, 0x00, 0x02, 0x03, 0x04, 0x05, 0x07, 0x09,
+ 0x0b, 0x0f, 0x10, 0x13, 0x16, 0x1c, 0x1f, 0x22,
+ 0x28, 0x2f, 0x33, 0x37, 0x3b, 0x40,
+} // Size: 46 bytes
+
+var ordinalLangToIndex = []uint8{ // 775 elements
+ // Entry 0 - 3F
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x12, 0x12, 0x00, 0x00, 0x00, 0x00, 0x10, 0x10,
+ 0x10, 0x10, 0x10, 0x00, 0x00, 0x05, 0x05, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ // Entry 40 - 7F
+ 0x12, 0x12, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e,
+ 0x0e, 0x0e, 0x0e, 0x0e, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x14, 0x14, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ // Entry 80 - BF
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x0c,
+ 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c,
+ 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c,
+ 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c,
+ 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c,
+ 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c,
+ 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c,
+ 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c,
+ // Entry C0 - FF
+ 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c,
+ 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c,
+ 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c,
+ 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c,
+ 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c,
+ 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ // Entry 100 - 13F
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x02,
+ 0x00, 0x00, 0x00, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ // Entry 140 - 17F
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x00, 0x00, 0x00, 0x00, 0x02, 0x02,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x11, 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11,
+ 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x03,
+ 0x02, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ // Entry 180 - 1BF
+ 0x00, 0x00, 0x00, 0x00, 0x09, 0x09, 0x09, 0x09,
+ 0x09, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x0a, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x08, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ // Entry 1C0 - 1FF
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x02, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x0f, 0x0f, 0x00, 0x00,
+ 0x00, 0x00, 0x02, 0x0d, 0x0d, 0x02, 0x02, 0x02,
+ 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ // Entry 200 - 23F
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x04, 0x04, 0x04, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x13, 0x13, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ // Entry 240 - 27F
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02,
+ 0x02, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ // Entry 280 - 2BF
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x0b, 0x0b, 0x0b, 0x0b, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01,
+ 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x07, 0x07, 0x02, 0x00, 0x00, 0x00, 0x00,
+ // Entry 2C0 - 2FF
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x06, 0x06, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x02, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ // Entry 300 - 33F
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, 0x0c,
+} // Size: 799 bytes
+
+var ordinalInclusionMasks = []uint64{ // 100 elements
+ // Entry 0 - 1F
+ 0x0000002000010009, 0x00000018482000d3, 0x0000000042840195, 0x000000410a040581,
+ 0x00000041040c0081, 0x0000009840040041, 0x0000008400045001, 0x0000003850040001,
+ 0x0000003850060001, 0x0000003800049001, 0x0000000800052001, 0x0000000040660031,
+ 0x0000000041840331, 0x0000000100040f01, 0x00000001001c0001, 0x0000000040040001,
+ 0x0000000000045001, 0x0000000070040001, 0x0000000070040001, 0x0000000000049001,
+ 0x0000000080050001, 0x0000000040200011, 0x0000000040800111, 0x0000000100000501,
+ 0x0000000100080001, 0x0000000040000001, 0x0000000000005001, 0x0000000050000001,
+ 0x0000000050000001, 0x0000000000009001, 0x0000000000010001, 0x0000000040200011,
+ // Entry 20 - 3F
+ 0x0000000040800111, 0x0000000100000501, 0x0000000100080001, 0x0000000040000001,
+ 0x0000000000005001, 0x0000000050000001, 0x0000000050000001, 0x0000000000009001,
+ 0x0000000200050001, 0x0000000040200011, 0x0000000040800111, 0x0000000100000501,
+ 0x0000000100080001, 0x0000000040000001, 0x0000000000005001, 0x0000000050000001,
+ 0x0000000050000001, 0x0000000000009001, 0x0000000080010001, 0x0000000040200011,
+ 0x0000000040800111, 0x0000000100000501, 0x0000000100080001, 0x0000000040000001,
+ 0x0000000000005001, 0x0000000050000001, 0x0000000050000001, 0x0000000000009001,
+ 0x0000000200050001, 0x0000000040200011, 0x0000000040800111, 0x0000000100000501,
+ // Entry 40 - 5F
+ 0x0000000100080001, 0x0000000040000001, 0x0000000000005001, 0x0000000050000001,
+ 0x0000000050000001, 0x0000000000009001, 0x0000000080010001, 0x0000000040200011,
+ 0x0000000040800111, 0x0000000100000501, 0x0000000100080001, 0x0000000040000001,
+ 0x0000000000005001, 0x0000000050000001, 0x0000000050000001, 0x0000000000009001,
+ 0x0000000080070001, 0x0000000040200011, 0x0000000040800111, 0x0000000100000501,
+ 0x0000000100080001, 0x0000000040000001, 0x0000000000005001, 0x0000000050000001,
+ 0x0000000050000001, 0x0000000000009001, 0x0000000200010001, 0x0000000040200011,
+ 0x0000000040800111, 0x0000000100000501, 0x0000000100080001, 0x0000000040000001,
+ // Entry 60 - 7F
+ 0x0000000000005001, 0x0000000050000001, 0x0000000050000001, 0x0000000000009001,
+} // Size: 824 bytes
+
+// Slots used for ordinal: 40 of 0xFF rules; 16 of 0xFF indexes; 40 of 64 sets
+
+var cardinalRules = []pluralCheck{ // 166 elements
+ 0: {cat: 0x2, setID: 0x3},
+ 1: {cat: 0x22, setID: 0x1},
+ 2: {cat: 0x2, setID: 0x4},
+ 3: {cat: 0x2, setID: 0x4},
+ 4: {cat: 0x7, setID: 0x1},
+ 5: {cat: 0x62, setID: 0x3},
+ 6: {cat: 0x22, setID: 0x4},
+ 7: {cat: 0x7, setID: 0x3},
+ 8: {cat: 0x42, setID: 0x1},
+ 9: {cat: 0x22, setID: 0x4},
+ 10: {cat: 0x22, setID: 0x4},
+ 11: {cat: 0x22, setID: 0x5},
+ 12: {cat: 0x22, setID: 0x1},
+ 13: {cat: 0x22, setID: 0x1},
+ 14: {cat: 0x7, setID: 0x4},
+ 15: {cat: 0x92, setID: 0x3},
+ 16: {cat: 0xf, setID: 0x6},
+ 17: {cat: 0x1f, setID: 0x7},
+ 18: {cat: 0x82, setID: 0x3},
+ 19: {cat: 0x92, setID: 0x3},
+ 20: {cat: 0xf, setID: 0x6},
+ 21: {cat: 0x62, setID: 0x3},
+ 22: {cat: 0x4a, setID: 0x6},
+ 23: {cat: 0x7, setID: 0x8},
+ 24: {cat: 0x62, setID: 0x3},
+ 25: {cat: 0x1f, setID: 0x9},
+ 26: {cat: 0x62, setID: 0x3},
+ 27: {cat: 0x5f, setID: 0x9},
+ 28: {cat: 0x72, setID: 0x3},
+ 29: {cat: 0x29, setID: 0xa},
+ 30: {cat: 0x29, setID: 0xb},
+ 31: {cat: 0x4f, setID: 0xb},
+ 32: {cat: 0x61, setID: 0x2},
+ 33: {cat: 0x2f, setID: 0x6},
+ 34: {cat: 0x3a, setID: 0x7},
+ 35: {cat: 0x4f, setID: 0x6},
+ 36: {cat: 0x5f, setID: 0x7},
+ 37: {cat: 0x62, setID: 0x2},
+ 38: {cat: 0x4f, setID: 0x6},
+ 39: {cat: 0x72, setID: 0x2},
+ 40: {cat: 0x21, setID: 0x3},
+ 41: {cat: 0x7, setID: 0x4},
+ 42: {cat: 0x32, setID: 0x3},
+ 43: {cat: 0x21, setID: 0x3},
+ 44: {cat: 0x22, setID: 0x1},
+ 45: {cat: 0x22, setID: 0x1},
+ 46: {cat: 0x23, setID: 0x2},
+ 47: {cat: 0x2, setID: 0x3},
+ 48: {cat: 0x22, setID: 0x1},
+ 49: {cat: 0x24, setID: 0xc},
+ 50: {cat: 0x7, setID: 0x1},
+ 51: {cat: 0x62, setID: 0x3},
+ 52: {cat: 0x74, setID: 0x3},
+ 53: {cat: 0x24, setID: 0x3},
+ 54: {cat: 0x2f, setID: 0xd},
+ 55: {cat: 0x34, setID: 0x1},
+ 56: {cat: 0xf, setID: 0x6},
+ 57: {cat: 0x1f, setID: 0x7},
+ 58: {cat: 0x62, setID: 0x3},
+ 59: {cat: 0x4f, setID: 0x6},
+ 60: {cat: 0x5a, setID: 0x7},
+ 61: {cat: 0xf, setID: 0xe},
+ 62: {cat: 0x1f, setID: 0xf},
+ 63: {cat: 0x64, setID: 0x3},
+ 64: {cat: 0x4f, setID: 0xe},
+ 65: {cat: 0x5c, setID: 0xf},
+ 66: {cat: 0x22, setID: 0x10},
+ 67: {cat: 0x23, setID: 0x11},
+ 68: {cat: 0x24, setID: 0x12},
+ 69: {cat: 0xf, setID: 0x1},
+ 70: {cat: 0x62, setID: 0x3},
+ 71: {cat: 0xf, setID: 0x2},
+ 72: {cat: 0x63, setID: 0x3},
+ 73: {cat: 0xf, setID: 0x13},
+ 74: {cat: 0x64, setID: 0x3},
+ 75: {cat: 0x74, setID: 0x3},
+ 76: {cat: 0xf, setID: 0x1},
+ 77: {cat: 0x62, setID: 0x3},
+ 78: {cat: 0x4a, setID: 0x1},
+ 79: {cat: 0xf, setID: 0x2},
+ 80: {cat: 0x63, setID: 0x3},
+ 81: {cat: 0x4b, setID: 0x2},
+ 82: {cat: 0xf, setID: 0x13},
+ 83: {cat: 0x64, setID: 0x3},
+ 84: {cat: 0x4c, setID: 0x13},
+ 85: {cat: 0x7, setID: 0x1},
+ 86: {cat: 0x62, setID: 0x3},
+ 87: {cat: 0x7, setID: 0x2},
+ 88: {cat: 0x63, setID: 0x3},
+ 89: {cat: 0x2f, setID: 0xa},
+ 90: {cat: 0x37, setID: 0x14},
+ 91: {cat: 0x65, setID: 0x3},
+ 92: {cat: 0x7, setID: 0x1},
+ 93: {cat: 0x62, setID: 0x3},
+ 94: {cat: 0x7, setID: 0x15},
+ 95: {cat: 0x64, setID: 0x3},
+ 96: {cat: 0x75, setID: 0x3},
+ 97: {cat: 0x7, setID: 0x1},
+ 98: {cat: 0x62, setID: 0x3},
+ 99: {cat: 0xf, setID: 0xe},
+ 100: {cat: 0x1f, setID: 0xf},
+ 101: {cat: 0x64, setID: 0x3},
+ 102: {cat: 0xf, setID: 0x16},
+ 103: {cat: 0x17, setID: 0x1},
+ 104: {cat: 0x65, setID: 0x3},
+ 105: {cat: 0xf, setID: 0x17},
+ 106: {cat: 0x65, setID: 0x3},
+ 107: {cat: 0xf, setID: 0xf},
+ 108: {cat: 0x65, setID: 0x3},
+ 109: {cat: 0x2f, setID: 0x6},
+ 110: {cat: 0x3a, setID: 0x7},
+ 111: {cat: 0x2f, setID: 0xe},
+ 112: {cat: 0x3c, setID: 0xf},
+ 113: {cat: 0x2d, setID: 0xa},
+ 114: {cat: 0x2d, setID: 0x17},
+ 115: {cat: 0x2d, setID: 0x18},
+ 116: {cat: 0x2f, setID: 0x6},
+ 117: {cat: 0x3a, setID: 0xb},
+ 118: {cat: 0x2f, setID: 0x19},
+ 119: {cat: 0x3c, setID: 0xb},
+ 120: {cat: 0x55, setID: 0x3},
+ 121: {cat: 0x22, setID: 0x1},
+ 122: {cat: 0x24, setID: 0x3},
+ 123: {cat: 0x2c, setID: 0xc},
+ 124: {cat: 0x2d, setID: 0xb},
+ 125: {cat: 0xf, setID: 0x6},
+ 126: {cat: 0x1f, setID: 0x7},
+ 127: {cat: 0x62, setID: 0x3},
+ 128: {cat: 0xf, setID: 0xe},
+ 129: {cat: 0x1f, setID: 0xf},
+ 130: {cat: 0x64, setID: 0x3},
+ 131: {cat: 0xf, setID: 0xa},
+ 132: {cat: 0x65, setID: 0x3},
+ 133: {cat: 0xf, setID: 0x17},
+ 134: {cat: 0x65, setID: 0x3},
+ 135: {cat: 0xf, setID: 0x18},
+ 136: {cat: 0x65, setID: 0x3},
+ 137: {cat: 0x2f, setID: 0x6},
+ 138: {cat: 0x3a, setID: 0x1a},
+ 139: {cat: 0x2f, setID: 0x1b},
+ 140: {cat: 0x3b, setID: 0x1c},
+ 141: {cat: 0x2f, setID: 0x1d},
+ 142: {cat: 0x3c, setID: 0x1e},
+ 143: {cat: 0x37, setID: 0x3},
+ 144: {cat: 0xa5, setID: 0x0},
+ 145: {cat: 0x22, setID: 0x1},
+ 146: {cat: 0x23, setID: 0x2},
+ 147: {cat: 0x24, setID: 0x1f},
+ 148: {cat: 0x25, setID: 0x20},
+ 149: {cat: 0xf, setID: 0x6},
+ 150: {cat: 0x62, setID: 0x3},
+ 151: {cat: 0xf, setID: 0x1b},
+ 152: {cat: 0x63, setID: 0x3},
+ 153: {cat: 0xf, setID: 0x21},
+ 154: {cat: 0x64, setID: 0x3},
+ 155: {cat: 0x75, setID: 0x3},
+ 156: {cat: 0x21, setID: 0x3},
+ 157: {cat: 0x22, setID: 0x1},
+ 158: {cat: 0x23, setID: 0x2},
+ 159: {cat: 0x2c, setID: 0x22},
+ 160: {cat: 0x2d, setID: 0x5},
+ 161: {cat: 0x21, setID: 0x3},
+ 162: {cat: 0x22, setID: 0x1},
+ 163: {cat: 0x23, setID: 0x2},
+ 164: {cat: 0x24, setID: 0x23},
+ 165: {cat: 0x25, setID: 0x24},
+} // Size: 356 bytes
+
+var cardinalIndex = []uint8{ // 36 elements
+ 0x00, 0x00, 0x02, 0x03, 0x04, 0x06, 0x09, 0x0a,
+ 0x0c, 0x0d, 0x10, 0x14, 0x17, 0x1d, 0x28, 0x2b,
+ 0x2d, 0x2f, 0x32, 0x38, 0x42, 0x45, 0x4c, 0x55,
+ 0x5c, 0x61, 0x6d, 0x74, 0x79, 0x7d, 0x89, 0x91,
+ 0x95, 0x9c, 0xa1, 0xa6,
+} // Size: 60 bytes
+
+var cardinalLangToIndex = []uint8{ // 775 elements
+ // Entry 0 - 3F
+ 0x00, 0x08, 0x08, 0x08, 0x00, 0x00, 0x06, 0x06,
+ 0x01, 0x01, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21,
+ 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21,
+ 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21,
+ 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21,
+ 0x01, 0x01, 0x08, 0x08, 0x04, 0x04, 0x08, 0x08,
+ 0x08, 0x08, 0x08, 0x00, 0x00, 0x1a, 0x1a, 0x08,
+ 0x08, 0x08, 0x08, 0x08, 0x08, 0x06, 0x00, 0x00,
+ // Entry 40 - 7F
+ 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x1e, 0x1e,
+ 0x08, 0x08, 0x13, 0x13, 0x13, 0x13, 0x13, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x00, 0x00, 0x00, 0x08,
+ 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08,
+ 0x18, 0x18, 0x00, 0x00, 0x22, 0x22, 0x09, 0x09,
+ 0x09, 0x00, 0x00, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x00, 0x00, 0x16, 0x16, 0x00,
+ 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ // Entry 80 - BF
+ 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ // Entry C0 - FF
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x08,
+ 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08,
+ 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08,
+ // Entry 100 - 13F
+ 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08,
+ 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x04, 0x04,
+ 0x08, 0x08, 0x00, 0x00, 0x01, 0x01, 0x01, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x04, 0x04, 0x0c, 0x0c,
+ 0x08, 0x08, 0x08, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ // Entry 140 - 17F
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x08, 0x08, 0x04, 0x04, 0x1f, 0x1f,
+ 0x14, 0x14, 0x04, 0x04, 0x08, 0x08, 0x08, 0x08,
+ 0x01, 0x01, 0x06, 0x00, 0x00, 0x20, 0x20, 0x08,
+ 0x08, 0x08, 0x08, 0x08, 0x08, 0x17, 0x17, 0x01,
+ 0x01, 0x13, 0x13, 0x13, 0x16, 0x16, 0x08, 0x08,
+ 0x02, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ // Entry 180 - 1BF
+ 0x00, 0x04, 0x0a, 0x0a, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x10, 0x17, 0x00, 0x00, 0x00, 0x08, 0x08,
+ 0x04, 0x08, 0x08, 0x00, 0x00, 0x08, 0x08, 0x02,
+ 0x02, 0x08, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x08, 0x08,
+ 0x08, 0x08, 0x08, 0x00, 0x00, 0x00, 0x00, 0x01,
+ 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x08,
+ 0x08, 0x08, 0x00, 0x00, 0x0f, 0x0f, 0x08, 0x10,
+ // Entry 1C0 - 1FF
+ 0x10, 0x08, 0x08, 0x0e, 0x0e, 0x08, 0x08, 0x08,
+ 0x08, 0x00, 0x00, 0x06, 0x06, 0x06, 0x06, 0x06,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x1b, 0x1b, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x0d, 0x0d, 0x08,
+ 0x08, 0x08, 0x00, 0x00, 0x00, 0x00, 0x06, 0x06,
+ 0x00, 0x00, 0x08, 0x08, 0x0b, 0x0b, 0x08, 0x08,
+ 0x08, 0x08, 0x12, 0x01, 0x01, 0x00, 0x00, 0x00,
+ 0x00, 0x1c, 0x1c, 0x00, 0x00, 0x00, 0x00, 0x00,
+ // Entry 200 - 23F
+ 0x00, 0x08, 0x10, 0x10, 0x08, 0x08, 0x08, 0x08,
+ 0x08, 0x00, 0x00, 0x00, 0x08, 0x08, 0x08, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x00,
+ 0x00, 0x08, 0x08, 0x08, 0x08, 0x08, 0x00, 0x08,
+ 0x06, 0x00, 0x00, 0x08, 0x08, 0x08, 0x08, 0x08,
+ 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x06, 0x06,
+ 0x06, 0x06, 0x06, 0x08, 0x19, 0x19, 0x0d, 0x0d,
+ 0x08, 0x08, 0x03, 0x04, 0x03, 0x04, 0x04, 0x04,
+ // Entry 240 - 27F
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x00,
+ 0x00, 0x00, 0x00, 0x08, 0x08, 0x00, 0x00, 0x12,
+ 0x12, 0x12, 0x08, 0x08, 0x1d, 0x1d, 0x1d, 0x1d,
+ 0x1d, 0x1d, 0x1d, 0x00, 0x00, 0x08, 0x08, 0x00,
+ 0x00, 0x08, 0x08, 0x00, 0x00, 0x08, 0x08, 0x08,
+ 0x10, 0x10, 0x10, 0x10, 0x08, 0x08, 0x00, 0x00,
+ 0x00, 0x00, 0x13, 0x11, 0x11, 0x11, 0x11, 0x11,
+ 0x05, 0x05, 0x18, 0x18, 0x15, 0x15, 0x10, 0x10,
+ // Entry 280 - 2BF
+ 0x10, 0x10, 0x10, 0x10, 0x08, 0x08, 0x08, 0x08,
+ 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x13,
+ 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
+ 0x13, 0x13, 0x08, 0x08, 0x08, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x08, 0x08,
+ 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08,
+ 0x08, 0x00, 0x00, 0x00, 0x00, 0x06, 0x06, 0x06,
+ 0x08, 0x08, 0x08, 0x0c, 0x08, 0x00, 0x00, 0x08,
+ // Entry 2C0 - 2FF
+ 0x08, 0x08, 0x08, 0x00, 0x00, 0x00, 0x00, 0x07,
+ 0x07, 0x08, 0x08, 0x1d, 0x1d, 0x04, 0x04, 0x04,
+ 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x08,
+ 0x08, 0x08, 0x08, 0x06, 0x08, 0x08, 0x00, 0x00,
+ 0x08, 0x08, 0x08, 0x00, 0x00, 0x04, 0x04, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ // Entry 300 - 33F
+ 0x00, 0x00, 0x00, 0x01, 0x01, 0x04, 0x04,
+} // Size: 799 bytes
+
+var cardinalInclusionMasks = []uint64{ // 100 elements
+ // Entry 0 - 1F
+ 0x0000000200500419, 0x0000000000512153, 0x000000000a327105, 0x0000000ca23c7101,
+ 0x00000004a23c7201, 0x0000000482943001, 0x0000001482943201, 0x0000000502943001,
+ 0x0000000502943001, 0x0000000522943201, 0x0000000540543401, 0x00000000454128e1,
+ 0x000000005b02e821, 0x000000006304e821, 0x000000006304ea21, 0x0000000042842821,
+ 0x0000000042842a21, 0x0000000042842821, 0x0000000042842821, 0x0000000062842a21,
+ 0x0000000200400421, 0x0000000000400061, 0x000000000a004021, 0x0000000022004021,
+ 0x0000000022004221, 0x0000000002800021, 0x0000000002800221, 0x0000000002800021,
+ 0x0000000002800021, 0x0000000022800221, 0x0000000000400421, 0x0000000000400061,
+ // Entry 20 - 3F
+ 0x000000000a004021, 0x0000000022004021, 0x0000000022004221, 0x0000000002800021,
+ 0x0000000002800221, 0x0000000002800021, 0x0000000002800021, 0x0000000022800221,
+ 0x0000000200400421, 0x0000000000400061, 0x000000000a004021, 0x0000000022004021,
+ 0x0000000022004221, 0x0000000002800021, 0x0000000002800221, 0x0000000002800021,
+ 0x0000000002800021, 0x0000000022800221, 0x0000000000400421, 0x0000000000400061,
+ 0x000000000a004021, 0x0000000022004021, 0x0000000022004221, 0x0000000002800021,
+ 0x0000000002800221, 0x0000000002800021, 0x0000000002800021, 0x0000000022800221,
+ 0x0000000200400421, 0x0000000000400061, 0x000000000a004021, 0x0000000022004021,
+ // Entry 40 - 5F
+ 0x0000000022004221, 0x0000000002800021, 0x0000000002800221, 0x0000000002800021,
+ 0x0000000002800021, 0x0000000022800221, 0x0000000040400421, 0x0000000044400061,
+ 0x000000005a004021, 0x0000000062004021, 0x0000000062004221, 0x0000000042800021,
+ 0x0000000042800221, 0x0000000042800021, 0x0000000042800021, 0x0000000062800221,
+ 0x0000000200400421, 0x0000000000400061, 0x000000000a004021, 0x0000000022004021,
+ 0x0000000022004221, 0x0000000002800021, 0x0000000002800221, 0x0000000002800021,
+ 0x0000000002800021, 0x0000000022800221, 0x0000000040400421, 0x0000000044400061,
+ 0x000000005a004021, 0x0000000062004021, 0x0000000062004221, 0x0000000042800021,
+ // Entry 60 - 7F
+ 0x0000000042800221, 0x0000000042800021, 0x0000000042800021, 0x0000000062800221,
+} // Size: 824 bytes
+
+// Slots used for cardinal: A6 of 0xFF rules; 24 of 0xFF indexes; 37 of 64 sets
+
+// Total table size 3860 bytes (3KiB); checksum: AAFBF21
diff --git a/vendor/golang.org/x/text/internal/catmsg/catmsg.go b/vendor/golang.org/x/text/internal/catmsg/catmsg.go
new file mode 100644
index 0000000000..1b257a7b4d
--- /dev/null
+++ b/vendor/golang.org/x/text/internal/catmsg/catmsg.go
@@ -0,0 +1,417 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package catmsg contains support types for package x/text/message/catalog.
+//
+// This package contains the low-level implementations of Message used by the
+// catalog package and provides primitives for other packages to implement their
+// own. For instance, the plural package provides functionality for selecting
+// translation strings based on the plural category of substitution arguments.
+//
+// # Encoding and Decoding
+//
+// Catalogs store Messages encoded as a single string. Compiling a message into
+// a string both results in compacter representation and speeds up evaluation.
+//
+// A Message must implement a Compile method to convert its arbitrary
+// representation to a string. The Compile method takes an Encoder which
+// facilitates serializing the message. Encoders also provide more context of
+// the messages's creation (such as for which language the message is intended),
+// which may not be known at the time of the creation of the message.
+//
+// Each message type must also have an accompanying decoder registered to decode
+// the message. This decoder takes a Decoder argument which provides the
+// counterparts for the decoding.
+//
+// # Renderers
+//
+// A Decoder must be initialized with a Renderer implementation. These
+// implementations must be provided by packages that use Catalogs, typically
+// formatting packages such as x/text/message. A typical user will not need to
+// worry about this type; it is only relevant to packages that do string
+// formatting and want to use the catalog package to handle localized strings.
+//
+// A package that uses catalogs for selecting strings receives selection results
+// as sequence of substrings passed to the Renderer. The following snippet shows
+// how to express the above example using the message package.
+//
+// message.Set(language.English, "You are %d minute(s) late.",
+// catalog.Var("minutes", plural.Select(1, "one", "minute")),
+// catalog.String("You are %[1]d ${minutes} late."))
+//
+// p := message.NewPrinter(language.English)
+// p.Printf("You are %d minute(s) late.", 5) // always 5 minutes late.
+//
+// To evaluate the Printf, package message wraps the arguments in a Renderer
+// that is passed to the catalog for message decoding. The call sequence that
+// results from evaluating the above message, assuming the person is rather
+// tardy, is:
+//
+// Render("You are %[1]d ")
+// Arg(1)
+// Render("minutes")
+// Render(" late.")
+//
+// The calls to Arg is caused by the plural.Select execution, which evaluates
+// the argument to determine whether the singular or plural message form should
+// be selected. The calls to Render reports the partial results to the message
+// package for further evaluation.
+package catmsg
+
+import (
+ "errors"
+ "fmt"
+ "strconv"
+ "strings"
+ "sync"
+
+ "golang.org/x/text/language"
+)
+
+// A Handle refers to a registered message type.
+type Handle int
+
+// A Handler decodes and evaluates data compiled by a Message and sends the
+// result to the Decoder. The output may depend on the value of the substitution
+// arguments, accessible by the Decoder's Arg method. The Handler returns false
+// if there is no translation for the given substitution arguments.
+type Handler func(d *Decoder) bool
+
+// Register records the existence of a message type and returns a Handle that
+// can be used in the Encoder's EncodeMessageType method to create such
+// messages. The prefix of the name should be the package path followed by
+// an optional disambiguating string.
+// Register will panic if a handle for the same name was already registered.
+func Register(name string, handler Handler) Handle {
+ mutex.Lock()
+ defer mutex.Unlock()
+
+ if _, ok := names[name]; ok {
+ panic(fmt.Errorf("catmsg: handler for %q already exists", name))
+ }
+ h := Handle(len(handlers))
+ names[name] = h
+ handlers = append(handlers, handler)
+ return h
+}
+
+// These handlers require fixed positions in the handlers slice.
+const (
+ msgVars Handle = iota
+ msgFirst
+ msgRaw
+ msgString
+ msgAffix
+ // Leave some arbitrary room for future expansion: 20 should suffice.
+ numInternal = 20
+)
+
+const prefix = "golang.org/x/text/internal/catmsg."
+
+var (
+ // TODO: find a more stable way to link handles to message types.
+ mutex sync.Mutex
+ names = map[string]Handle{
+ prefix + "Vars": msgVars,
+ prefix + "First": msgFirst,
+ prefix + "Raw": msgRaw,
+ prefix + "String": msgString,
+ prefix + "Affix": msgAffix,
+ }
+ handlers = make([]Handler, numInternal)
+)
+
+func init() {
+ // This handler is a message type wrapper that initializes a decoder
+ // with a variable block. This message type, if present, is always at the
+ // start of an encoded message.
+ handlers[msgVars] = func(d *Decoder) bool {
+ blockSize := int(d.DecodeUint())
+ d.vars = d.data[:blockSize]
+ d.data = d.data[blockSize:]
+ return d.executeMessage()
+ }
+
+ // First takes the first message in a sequence that results in a match for
+ // the given substitution arguments.
+ handlers[msgFirst] = func(d *Decoder) bool {
+ for !d.Done() {
+ if d.ExecuteMessage() {
+ return true
+ }
+ }
+ return false
+ }
+
+ handlers[msgRaw] = func(d *Decoder) bool {
+ d.Render(d.data)
+ return true
+ }
+
+ // A String message alternates between a string constant and a variable
+ // substitution.
+ handlers[msgString] = func(d *Decoder) bool {
+ for !d.Done() {
+ if str := d.DecodeString(); str != "" {
+ d.Render(str)
+ }
+ if d.Done() {
+ break
+ }
+ d.ExecuteSubstitution()
+ }
+ return true
+ }
+
+ handlers[msgAffix] = func(d *Decoder) bool {
+ // TODO: use an alternative method for common cases.
+ prefix := d.DecodeString()
+ suffix := d.DecodeString()
+ if prefix != "" {
+ d.Render(prefix)
+ }
+ ret := d.ExecuteMessage()
+ if suffix != "" {
+ d.Render(suffix)
+ }
+ return ret
+ }
+}
+
+var (
+ // ErrIncomplete indicates a compiled message does not define translations
+ // for all possible argument values. If this message is returned, evaluating
+ // a message may result in the ErrNoMatch error.
+ ErrIncomplete = errors.New("catmsg: incomplete message; may not give result for all inputs")
+
+ // ErrNoMatch indicates no translation message matched the given input
+ // parameters when evaluating a message.
+ ErrNoMatch = errors.New("catmsg: no translation for inputs")
+)
+
+// A Message holds a collection of translations for the same phrase that may
+// vary based on the values of substitution arguments.
+type Message interface {
+ // Compile encodes the format string(s) of the message as a string for later
+ // evaluation.
+ //
+ // The first call Compile makes on the encoder must be EncodeMessageType.
+ // The handle passed to this call may either be a handle returned by
+ // Register to encode a single custom message, or HandleFirst followed by
+ // a sequence of calls to EncodeMessage.
+ //
+ // Compile must return ErrIncomplete if it is possible for evaluation to
+ // not match any translation for a given set of formatting parameters.
+ // For example, selecting a translation based on plural form may not yield
+ // a match if the form "Other" is not one of the selectors.
+ //
+ // Compile may return any other application-specific error. For backwards
+ // compatibility with package like fmt, which often do not do sanity
+ // checking of format strings ahead of time, Compile should still make an
+ // effort to have some sensible fallback in case of an error.
+ Compile(e *Encoder) error
+}
+
+// Compile converts a Message to a data string that can be stored in a Catalog.
+// The resulting string can subsequently be decoded by passing to the Execute
+// method of a Decoder.
+func Compile(tag language.Tag, macros Dictionary, m Message) (data string, err error) {
+ // TODO: pass macros so they can be used for validation.
+ v := &Encoder{inBody: true} // encoder for variables
+ v.root = v
+ e := &Encoder{root: v, parent: v, tag: tag} // encoder for messages
+ err = m.Compile(e)
+ // This package serves te message package, which in turn is meant to be a
+ // drop-in replacement for fmt. With the fmt package, format strings are
+ // evaluated lazily and errors are handled by substituting strings in the
+ // result, rather then returning an error. Dealing with multiple languages
+ // makes it more important to check errors ahead of time. We chose to be
+ // consistent and compatible and allow graceful degradation in case of
+ // errors.
+ buf := e.buf[stripPrefix(e.buf):]
+ if len(v.buf) > 0 {
+ // Prepend variable block.
+ b := make([]byte, 1+maxVarintBytes+len(v.buf)+len(buf))
+ b[0] = byte(msgVars)
+ b = b[:1+encodeUint(b[1:], uint64(len(v.buf)))]
+ b = append(b, v.buf...)
+ b = append(b, buf...)
+ buf = b
+ }
+ if err == nil {
+ err = v.err
+ }
+ return string(buf), err
+}
+
+// FirstOf is a message type that prints the first message in the sequence that
+// resolves to a match for the given substitution arguments.
+type FirstOf []Message
+
+// Compile implements Message.
+func (s FirstOf) Compile(e *Encoder) error {
+ e.EncodeMessageType(msgFirst)
+ err := ErrIncomplete
+ for i, m := range s {
+ if err == nil {
+ return fmt.Errorf("catalog: message argument %d is complete and blocks subsequent messages", i-1)
+ }
+ err = e.EncodeMessage(m)
+ }
+ return err
+}
+
+// Var defines a message that can be substituted for a placeholder of the same
+// name. If an expression does not result in a string after evaluation, Name is
+// used as the substitution. For example:
+//
+// Var{
+// Name: "minutes",
+// Message: plural.Select(1, "one", "minute"),
+// }
+//
+// will resolve to minute for singular and minutes for plural forms.
+type Var struct {
+ Name string
+ Message Message
+}
+
+var errIsVar = errors.New("catmsg: variable used as message")
+
+// Compile implements Message.
+//
+// Note that this method merely registers a variable; it does not create an
+// encoded message.
+func (v *Var) Compile(e *Encoder) error {
+ if err := e.addVar(v.Name, v.Message); err != nil {
+ return err
+ }
+ // Using a Var by itself is an error. If it is in a sequence followed by
+ // other messages referring to it, this error will be ignored.
+ return errIsVar
+}
+
+// Raw is a message consisting of a single format string that is passed as is
+// to the Renderer.
+//
+// Note that a Renderer may still do its own variable substitution.
+type Raw string
+
+// Compile implements Message.
+func (r Raw) Compile(e *Encoder) (err error) {
+ e.EncodeMessageType(msgRaw)
+ // Special case: raw strings don't have a size encoding and so don't use
+ // EncodeString.
+ e.buf = append(e.buf, r...)
+ return nil
+}
+
+// String is a message consisting of a single format string which contains
+// placeholders that may be substituted with variables.
+//
+// Variable substitutions are marked with placeholders and a variable name of
+// the form ${name}. Any other substitutions such as Go templates or
+// printf-style substitutions are left to be done by the Renderer.
+//
+// When evaluation a string interpolation, a Renderer will receive separate
+// calls for each placeholder and interstitial string. For example, for the
+// message: "%[1]v ${invites} %[2]v to ${their} party." The sequence of calls
+// is:
+//
+// d.Render("%[1]v ")
+// d.Arg(1)
+// d.Render(resultOfInvites)
+// d.Render(" %[2]v to ")
+// d.Arg(2)
+// d.Render(resultOfTheir)
+// d.Render(" party.")
+//
+// where the messages for "invites" and "their" both use a plural.Select
+// referring to the first argument.
+//
+// Strings may also invoke macros. Macros are essentially variables that can be
+// reused. Macros may, for instance, be used to make selections between
+// different conjugations of a verb. See the catalog package description for an
+// overview of macros.
+type String string
+
+// Compile implements Message. It parses the placeholder formats and returns
+// any error.
+func (s String) Compile(e *Encoder) (err error) {
+ msg := string(s)
+ const subStart = "${"
+ hasHeader := false
+ p := 0
+ b := []byte{}
+ for {
+ i := strings.Index(msg[p:], subStart)
+ if i == -1 {
+ break
+ }
+ b = append(b, msg[p:p+i]...)
+ p += i + len(subStart)
+ if i = strings.IndexByte(msg[p:], '}'); i == -1 {
+ b = append(b, "$!(MISSINGBRACE)"...)
+ err = fmt.Errorf("catmsg: missing '}'")
+ p = len(msg)
+ break
+ }
+ name := strings.TrimSpace(msg[p : p+i])
+ if q := strings.IndexByte(name, '('); q == -1 {
+ if !hasHeader {
+ hasHeader = true
+ e.EncodeMessageType(msgString)
+ }
+ e.EncodeString(string(b))
+ e.EncodeSubstitution(name)
+ b = b[:0]
+ } else if j := strings.IndexByte(name[q:], ')'); j == -1 {
+ // TODO: what should the error be?
+ b = append(b, "$!(MISSINGPAREN)"...)
+ err = fmt.Errorf("catmsg: missing ')'")
+ } else if x, sErr := strconv.ParseUint(strings.TrimSpace(name[q+1:q+j]), 10, 32); sErr != nil {
+ // TODO: handle more than one argument
+ b = append(b, "$!(BADNUM)"...)
+ err = fmt.Errorf("catmsg: invalid number %q", strings.TrimSpace(name[q+1:q+j]))
+ } else {
+ if !hasHeader {
+ hasHeader = true
+ e.EncodeMessageType(msgString)
+ }
+ e.EncodeString(string(b))
+ e.EncodeSubstitution(name[:q], int(x))
+ b = b[:0]
+ }
+ p += i + 1
+ }
+ b = append(b, msg[p:]...)
+ if !hasHeader {
+ // Simplify string to a raw string.
+ Raw(string(b)).Compile(e)
+ } else if len(b) > 0 {
+ e.EncodeString(string(b))
+ }
+ return err
+}
+
+// Affix is a message that adds a prefix and suffix to another message.
+// This is mostly used add back whitespace to a translation that was stripped
+// before sending it out.
+type Affix struct {
+ Message Message
+ Prefix string
+ Suffix string
+}
+
+// Compile implements Message.
+func (a Affix) Compile(e *Encoder) (err error) {
+ // TODO: consider adding a special message type that just adds a single
+ // return. This is probably common enough to handle the majority of cases.
+ // Get some stats first, though.
+ e.EncodeMessageType(msgAffix)
+ e.EncodeString(a.Prefix)
+ e.EncodeString(a.Suffix)
+ e.EncodeMessage(a.Message)
+ return nil
+}
diff --git a/vendor/golang.org/x/text/internal/catmsg/codec.go b/vendor/golang.org/x/text/internal/catmsg/codec.go
new file mode 100644
index 0000000000..49c9fc9789
--- /dev/null
+++ b/vendor/golang.org/x/text/internal/catmsg/codec.go
@@ -0,0 +1,407 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package catmsg
+
+import (
+ "errors"
+ "fmt"
+
+ "golang.org/x/text/language"
+)
+
+// A Renderer renders a Message.
+type Renderer interface {
+ // Render renders the given string. The given string may be interpreted as a
+ // format string, such as the one used by the fmt package or a template.
+ Render(s string)
+
+ // Arg returns the i-th argument passed to format a message. This method
+ // should return nil if there is no such argument. Messages need access to
+ // arguments to allow selecting a message based on linguistic features of
+ // those arguments.
+ Arg(i int) interface{}
+}
+
+// A Dictionary specifies a source of messages, including variables or macros.
+type Dictionary interface {
+ // Lookup returns the message for the given key. It returns false for ok if
+ // such a message could not be found.
+ Lookup(key string) (data string, ok bool)
+
+ // TODO: consider returning an interface, instead of a string. This will
+ // allow implementations to do their own message type decoding.
+}
+
+// An Encoder serializes a Message to a string.
+type Encoder struct {
+ // The root encoder is used for storing encoded variables.
+ root *Encoder
+ // The parent encoder provides the surrounding scopes for resolving variable
+ // names.
+ parent *Encoder
+
+ tag language.Tag
+
+ // buf holds the encoded message so far. After a message completes encoding,
+ // the contents of buf, prefixed by the encoded length, are flushed to the
+ // parent buffer.
+ buf []byte
+
+ // vars is the lookup table of variables in the current scope.
+ vars []keyVal
+
+ err error
+ inBody bool // if false next call must be EncodeMessageType
+}
+
+type keyVal struct {
+ key string
+ offset int
+}
+
+// Language reports the language for which the encoded message will be stored
+// in the Catalog.
+func (e *Encoder) Language() language.Tag { return e.tag }
+
+func (e *Encoder) setError(err error) {
+ if e.root.err == nil {
+ e.root.err = err
+ }
+}
+
+// EncodeUint encodes x.
+func (e *Encoder) EncodeUint(x uint64) {
+ e.checkInBody()
+ var buf [maxVarintBytes]byte
+ n := encodeUint(buf[:], x)
+ e.buf = append(e.buf, buf[:n]...)
+}
+
+// EncodeString encodes s.
+func (e *Encoder) EncodeString(s string) {
+ e.checkInBody()
+ e.EncodeUint(uint64(len(s)))
+ e.buf = append(e.buf, s...)
+}
+
+// EncodeMessageType marks the current message to be of type h.
+//
+// It must be the first call of a Message's Compile method.
+func (e *Encoder) EncodeMessageType(h Handle) {
+ if e.inBody {
+ panic("catmsg: EncodeMessageType not the first method called")
+ }
+ e.inBody = true
+ e.EncodeUint(uint64(h))
+}
+
+// EncodeMessage serializes the given message inline at the current position.
+func (e *Encoder) EncodeMessage(m Message) error {
+ e = &Encoder{root: e.root, parent: e, tag: e.tag}
+ err := m.Compile(e)
+ if _, ok := m.(*Var); !ok {
+ e.flushTo(e.parent)
+ }
+ return err
+}
+
+func (e *Encoder) checkInBody() {
+ if !e.inBody {
+ panic("catmsg: expected prior call to EncodeMessageType")
+ }
+}
+
+// stripPrefix indicates the number of prefix bytes that must be stripped to
+// turn a single-element sequence into a message that is just this single member
+// without its size prefix. If the message can be stripped, b[1:n] contains the
+// size prefix.
+func stripPrefix(b []byte) (n int) {
+ if len(b) > 0 && Handle(b[0]) == msgFirst {
+ x, n, _ := decodeUint(b[1:])
+ if 1+n+int(x) == len(b) {
+ return 1 + n
+ }
+ }
+ return 0
+}
+
+func (e *Encoder) flushTo(dst *Encoder) {
+ data := e.buf
+ p := stripPrefix(data)
+ if p > 0 {
+ data = data[1:]
+ } else {
+ // Prefix the size.
+ dst.EncodeUint(uint64(len(data)))
+ }
+ dst.buf = append(dst.buf, data...)
+}
+
+func (e *Encoder) addVar(key string, m Message) error {
+ for _, v := range e.parent.vars {
+ if v.key == key {
+ err := fmt.Errorf("catmsg: duplicate variable %q", key)
+ e.setError(err)
+ return err
+ }
+ }
+ scope := e.parent
+ // If a variable message is Incomplete, and does not evaluate to a message
+ // during execution, we fall back to the variable name. We encode this by
+ // appending the variable name if the message reports it's incomplete.
+
+ err := m.Compile(e)
+ if err != ErrIncomplete {
+ e.setError(err)
+ }
+ switch {
+ case len(e.buf) == 1 && Handle(e.buf[0]) == msgFirst: // empty sequence
+ e.buf = e.buf[:0]
+ e.inBody = false
+ fallthrough
+ case len(e.buf) == 0:
+ // Empty message.
+ if err := String(key).Compile(e); err != nil {
+ e.setError(err)
+ }
+ case err == ErrIncomplete:
+ if Handle(e.buf[0]) != msgFirst {
+ seq := &Encoder{root: e.root, parent: e}
+ seq.EncodeMessageType(msgFirst)
+ e.flushTo(seq)
+ e = seq
+ }
+ // e contains a sequence; append the fallback string.
+ e.EncodeMessage(String(key))
+ }
+
+ // Flush result to variable heap.
+ offset := len(e.root.buf)
+ e.flushTo(e.root)
+ e.buf = e.buf[:0]
+
+ // Record variable offset in current scope.
+ scope.vars = append(scope.vars, keyVal{key: key, offset: offset})
+ return err
+}
+
+const (
+ substituteVar = iota
+ substituteMacro
+ substituteError
+)
+
+// EncodeSubstitution inserts a resolved reference to a variable or macro.
+//
+// This call must be matched with a call to ExecuteSubstitution at decoding
+// time.
+func (e *Encoder) EncodeSubstitution(name string, arguments ...int) {
+ if arity := len(arguments); arity > 0 {
+ // TODO: also resolve macros.
+ e.EncodeUint(substituteMacro)
+ e.EncodeString(name)
+ for _, a := range arguments {
+ e.EncodeUint(uint64(a))
+ }
+ return
+ }
+ for scope := e; scope != nil; scope = scope.parent {
+ for _, v := range scope.vars {
+ if v.key != name {
+ continue
+ }
+ e.EncodeUint(substituteVar) // TODO: support arity > 0
+ e.EncodeUint(uint64(v.offset))
+ return
+ }
+ }
+ // TODO: refer to dictionary-wide scoped variables.
+ e.EncodeUint(substituteError)
+ e.EncodeString(name)
+ e.setError(fmt.Errorf("catmsg: unknown var %q", name))
+}
+
+// A Decoder deserializes and evaluates messages that are encoded by an encoder.
+type Decoder struct {
+ tag language.Tag
+ dst Renderer
+ macros Dictionary
+
+ err error
+ vars string
+ data string
+
+ macroArg int // TODO: allow more than one argument
+}
+
+// NewDecoder returns a new Decoder.
+//
+// Decoders are designed to be reused for multiple invocations of Execute.
+// Only one goroutine may call Execute concurrently.
+func NewDecoder(tag language.Tag, r Renderer, macros Dictionary) *Decoder {
+ return &Decoder{
+ tag: tag,
+ dst: r,
+ macros: macros,
+ }
+}
+
+func (d *Decoder) setError(err error) {
+ if d.err == nil {
+ d.err = err
+ }
+}
+
+// Language returns the language in which the message is being rendered.
+//
+// The destination language may be a child language of the language used for
+// encoding. For instance, a decoding language of "pt-PT"" is consistent with an
+// encoding language of "pt".
+func (d *Decoder) Language() language.Tag { return d.tag }
+
+// Done reports whether there are more bytes to process in this message.
+func (d *Decoder) Done() bool { return len(d.data) == 0 }
+
+// Render implements Renderer.
+func (d *Decoder) Render(s string) { d.dst.Render(s) }
+
+// Arg implements Renderer.
+//
+// During evaluation of macros, the argument positions may be mapped to
+// arguments that differ from the original call.
+func (d *Decoder) Arg(i int) interface{} {
+ if d.macroArg != 0 {
+ if i != 1 {
+ panic("catmsg: only macros with single argument supported")
+ }
+ i = d.macroArg
+ }
+ return d.dst.Arg(i)
+}
+
+// DecodeUint decodes a number that was encoded with EncodeUint and advances the
+// position.
+func (d *Decoder) DecodeUint() uint64 {
+ x, n, err := decodeUintString(d.data)
+ d.data = d.data[n:]
+ if err != nil {
+ d.setError(err)
+ }
+ return x
+}
+
+// DecodeString decodes a string that was encoded with EncodeString and advances
+// the position.
+func (d *Decoder) DecodeString() string {
+ size := d.DecodeUint()
+ s := d.data[:size]
+ d.data = d.data[size:]
+ return s
+}
+
+// SkipMessage skips the message at the current location and advances the
+// position.
+func (d *Decoder) SkipMessage() {
+ n := int(d.DecodeUint())
+ d.data = d.data[n:]
+}
+
+// Execute decodes and evaluates msg.
+//
+// Only one goroutine may call execute.
+func (d *Decoder) Execute(msg string) error {
+ d.err = nil
+ if !d.execute(msg) {
+ return ErrNoMatch
+ }
+ return d.err
+}
+
+func (d *Decoder) execute(msg string) bool {
+ saved := d.data
+ d.data = msg
+ ok := d.executeMessage()
+ d.data = saved
+ return ok
+}
+
+// executeMessageFromData is like execute, but also decodes a leading message
+// size and clips the given string accordingly.
+//
+// It reports the number of bytes consumed and whether a message was selected.
+func (d *Decoder) executeMessageFromData(s string) (n int, ok bool) {
+ saved := d.data
+ d.data = s
+ size := int(d.DecodeUint())
+ n = len(s) - len(d.data)
+ // Sanitize the setting. This allows skipping a size argument for
+ // RawString and method Done.
+ d.data = d.data[:size]
+ ok = d.executeMessage()
+ n += size - len(d.data)
+ d.data = saved
+ return n, ok
+}
+
+var errUnknownHandler = errors.New("catmsg: string contains unsupported handler")
+
+// executeMessage reads the handle id, initializes the decoder and executes the
+// message. It is assumed that all of d.data[d.p:] is the single message.
+func (d *Decoder) executeMessage() bool {
+ if d.Done() {
+ // We interpret no data as a valid empty message.
+ return true
+ }
+ handle := d.DecodeUint()
+
+ var fn Handler
+ mutex.Lock()
+ if int(handle) < len(handlers) {
+ fn = handlers[handle]
+ }
+ mutex.Unlock()
+ if fn == nil {
+ d.setError(errUnknownHandler)
+ d.execute(fmt.Sprintf("\x02$!(UNKNOWNMSGHANDLER=%#x)", handle))
+ return true
+ }
+ return fn(d)
+}
+
+// ExecuteMessage decodes and executes the message at the current position.
+func (d *Decoder) ExecuteMessage() bool {
+ n, ok := d.executeMessageFromData(d.data)
+ d.data = d.data[n:]
+ return ok
+}
+
+// ExecuteSubstitution executes the message corresponding to the substitution
+// as encoded by EncodeSubstitution.
+func (d *Decoder) ExecuteSubstitution() {
+ switch x := d.DecodeUint(); x {
+ case substituteVar:
+ offset := d.DecodeUint()
+ d.executeMessageFromData(d.vars[offset:])
+ case substituteMacro:
+ name := d.DecodeString()
+ data, ok := d.macros.Lookup(name)
+ old := d.macroArg
+ // TODO: support macros of arity other than 1.
+ d.macroArg = int(d.DecodeUint())
+ switch {
+ case !ok:
+ // TODO: detect this at creation time.
+ d.setError(fmt.Errorf("catmsg: undefined macro %q", name))
+ fallthrough
+ case !d.execute(data):
+ d.dst.Render(name) // fall back to macro name.
+ }
+ d.macroArg = old
+ case substituteError:
+ d.dst.Render(d.DecodeString())
+ default:
+ panic("catmsg: unreachable")
+ }
+}
diff --git a/vendor/golang.org/x/text/internal/catmsg/varint.go b/vendor/golang.org/x/text/internal/catmsg/varint.go
new file mode 100644
index 0000000000..a2cee2cf5b
--- /dev/null
+++ b/vendor/golang.org/x/text/internal/catmsg/varint.go
@@ -0,0 +1,62 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package catmsg
+
+// This file implements varint encoding analogous to the one in encoding/binary.
+// We need a string version of this function, so we add that here and then add
+// the rest for consistency.
+
+import "errors"
+
+var (
+ errIllegalVarint = errors.New("catmsg: illegal varint")
+ errVarintTooLarge = errors.New("catmsg: varint too large for uint64")
+)
+
+const maxVarintBytes = 10 // maximum length of a varint
+
+// encodeUint encodes x as a variable-sized integer into buf and returns the
+// number of bytes written. buf must be at least maxVarintBytes long
+func encodeUint(buf []byte, x uint64) (n int) {
+ for ; x > 127; n++ {
+ buf[n] = 0x80 | uint8(x&0x7F)
+ x >>= 7
+ }
+ buf[n] = uint8(x)
+ n++
+ return n
+}
+
+func decodeUintString(s string) (x uint64, size int, err error) {
+ i := 0
+ for shift := uint(0); shift < 64; shift += 7 {
+ if i >= len(s) {
+ return 0, i, errIllegalVarint
+ }
+ b := uint64(s[i])
+ i++
+ x |= (b & 0x7F) << shift
+ if b&0x80 == 0 {
+ return x, i, nil
+ }
+ }
+ return 0, i, errVarintTooLarge
+}
+
+func decodeUint(b []byte) (x uint64, size int, err error) {
+ i := 0
+ for shift := uint(0); shift < 64; shift += 7 {
+ if i >= len(b) {
+ return 0, i, errIllegalVarint
+ }
+ c := uint64(b[i])
+ i++
+ x |= (c & 0x7F) << shift
+ if c&0x80 == 0 {
+ return x, i, nil
+ }
+ }
+ return 0, i, errVarintTooLarge
+}
diff --git a/vendor/golang.org/x/text/internal/format/format.go b/vendor/golang.org/x/text/internal/format/format.go
new file mode 100644
index 0000000000..ee1c57a3c5
--- /dev/null
+++ b/vendor/golang.org/x/text/internal/format/format.go
@@ -0,0 +1,41 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package format contains types for defining language-specific formatting of
+// values.
+//
+// This package is internal now, but will eventually be exposed after the API
+// settles.
+package format // import "golang.org/x/text/internal/format"
+
+import (
+ "fmt"
+
+ "golang.org/x/text/language"
+)
+
+// State represents the printer state passed to custom formatters. It provides
+// access to the fmt.State interface and the sentence and language-related
+// context.
+type State interface {
+ fmt.State
+
+ // Language reports the requested language in which to render a message.
+ Language() language.Tag
+
+ // TODO: consider this and removing rune from the Format method in the
+ // Formatter interface.
+ //
+ // Verb returns the format variant to render, analogous to the types used
+ // in fmt. Use 'v' for the default or only variant.
+ // Verb() rune
+
+ // TODO: more info:
+ // - sentence context such as linguistic features passed by the translator.
+}
+
+// Formatter is analogous to fmt.Formatter.
+type Formatter interface {
+ Format(state State, verb rune)
+}
diff --git a/vendor/golang.org/x/text/internal/format/parser.go b/vendor/golang.org/x/text/internal/format/parser.go
new file mode 100644
index 0000000000..855aed71db
--- /dev/null
+++ b/vendor/golang.org/x/text/internal/format/parser.go
@@ -0,0 +1,358 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package format
+
+import (
+ "reflect"
+ "unicode/utf8"
+)
+
+// A Parser parses a format string. The result from the parse are set in the
+// struct fields.
+type Parser struct {
+ Verb rune
+
+ WidthPresent bool
+ PrecPresent bool
+ Minus bool
+ Plus bool
+ Sharp bool
+ Space bool
+ Zero bool
+
+ // For the formats %+v %#v, we set the plusV/sharpV flags
+ // and clear the plus/sharp flags since %+v and %#v are in effect
+ // different, flagless formats set at the top level.
+ PlusV bool
+ SharpV bool
+
+ HasIndex bool
+
+ Width int
+ Prec int // precision
+
+ // retain arguments across calls.
+ Args []interface{}
+ // retain current argument number across calls
+ ArgNum int
+
+ // reordered records whether the format string used argument reordering.
+ Reordered bool
+ // goodArgNum records whether the most recent reordering directive was valid.
+ goodArgNum bool
+
+ // position info
+ format string
+ startPos int
+ endPos int
+ Status Status
+}
+
+// Reset initializes a parser to scan format strings for the given args.
+func (p *Parser) Reset(args []interface{}) {
+ p.Args = args
+ p.ArgNum = 0
+ p.startPos = 0
+ p.Reordered = false
+}
+
+// Text returns the part of the format string that was parsed by the last call
+// to Scan. It returns the original substitution clause if the current scan
+// parsed a substitution.
+func (p *Parser) Text() string { return p.format[p.startPos:p.endPos] }
+
+// SetFormat sets a new format string to parse. It does not reset the argument
+// count.
+func (p *Parser) SetFormat(format string) {
+ p.format = format
+ p.startPos = 0
+ p.endPos = 0
+}
+
+// Status indicates the result type of a call to Scan.
+type Status int
+
+const (
+ StatusText Status = iota
+ StatusSubstitution
+ StatusBadWidthSubstitution
+ StatusBadPrecSubstitution
+ StatusNoVerb
+ StatusBadArgNum
+ StatusMissingArg
+)
+
+// ClearFlags reset the parser to default behavior.
+func (p *Parser) ClearFlags() {
+ p.WidthPresent = false
+ p.PrecPresent = false
+ p.Minus = false
+ p.Plus = false
+ p.Sharp = false
+ p.Space = false
+ p.Zero = false
+
+ p.PlusV = false
+ p.SharpV = false
+
+ p.HasIndex = false
+}
+
+// Scan scans the next part of the format string and sets the status to
+// indicate whether it scanned a string literal, substitution or error.
+func (p *Parser) Scan() bool {
+ p.Status = StatusText
+ format := p.format
+ end := len(format)
+ if p.endPos >= end {
+ return false
+ }
+ afterIndex := false // previous item in format was an index like [3].
+
+ p.startPos = p.endPos
+ p.goodArgNum = true
+ i := p.startPos
+ for i < end && format[i] != '%' {
+ i++
+ }
+ if i > p.startPos {
+ p.endPos = i
+ return true
+ }
+ // Process one verb
+ i++
+
+ p.Status = StatusSubstitution
+
+ // Do we have flags?
+ p.ClearFlags()
+
+simpleFormat:
+ for ; i < end; i++ {
+ c := p.format[i]
+ switch c {
+ case '#':
+ p.Sharp = true
+ case '0':
+ p.Zero = !p.Minus // Only allow zero padding to the left.
+ case '+':
+ p.Plus = true
+ case '-':
+ p.Minus = true
+ p.Zero = false // Do not pad with zeros to the right.
+ case ' ':
+ p.Space = true
+ default:
+ // Fast path for common case of ascii lower case simple verbs
+ // without precision or width or argument indices.
+ if 'a' <= c && c <= 'z' && p.ArgNum < len(p.Args) {
+ if c == 'v' {
+ // Go syntax
+ p.SharpV = p.Sharp
+ p.Sharp = false
+ // Struct-field syntax
+ p.PlusV = p.Plus
+ p.Plus = false
+ }
+ p.Verb = rune(c)
+ p.ArgNum++
+ p.endPos = i + 1
+ return true
+ }
+ // Format is more complex than simple flags and a verb or is malformed.
+ break simpleFormat
+ }
+ }
+
+ // Do we have an explicit argument index?
+ i, afterIndex = p.updateArgNumber(format, i)
+
+ // Do we have width?
+ if i < end && format[i] == '*' {
+ i++
+ p.Width, p.WidthPresent = p.intFromArg()
+
+ if !p.WidthPresent {
+ p.Status = StatusBadWidthSubstitution
+ }
+
+ // We have a negative width, so take its value and ensure
+ // that the minus flag is set
+ if p.Width < 0 {
+ p.Width = -p.Width
+ p.Minus = true
+ p.Zero = false // Do not pad with zeros to the right.
+ }
+ afterIndex = false
+ } else {
+ p.Width, p.WidthPresent, i = parsenum(format, i, end)
+ if afterIndex && p.WidthPresent { // "%[3]2d"
+ p.goodArgNum = false
+ }
+ }
+
+ // Do we have precision?
+ if i+1 < end && format[i] == '.' {
+ i++
+ if afterIndex { // "%[3].2d"
+ p.goodArgNum = false
+ }
+ i, afterIndex = p.updateArgNumber(format, i)
+ if i < end && format[i] == '*' {
+ i++
+ p.Prec, p.PrecPresent = p.intFromArg()
+ // Negative precision arguments don't make sense
+ if p.Prec < 0 {
+ p.Prec = 0
+ p.PrecPresent = false
+ }
+ if !p.PrecPresent {
+ p.Status = StatusBadPrecSubstitution
+ }
+ afterIndex = false
+ } else {
+ p.Prec, p.PrecPresent, i = parsenum(format, i, end)
+ if !p.PrecPresent {
+ p.Prec = 0
+ p.PrecPresent = true
+ }
+ }
+ }
+
+ if !afterIndex {
+ i, afterIndex = p.updateArgNumber(format, i)
+ }
+ p.HasIndex = afterIndex
+
+ if i >= end {
+ p.endPos = i
+ p.Status = StatusNoVerb
+ return true
+ }
+
+ verb, w := utf8.DecodeRuneInString(format[i:])
+ p.endPos = i + w
+ p.Verb = verb
+
+ switch {
+ case verb == '%': // Percent does not absorb operands and ignores f.wid and f.prec.
+ p.startPos = p.endPos - 1
+ p.Status = StatusText
+ case !p.goodArgNum:
+ p.Status = StatusBadArgNum
+ case p.ArgNum >= len(p.Args): // No argument left over to print for the current verb.
+ p.Status = StatusMissingArg
+ p.ArgNum++
+ case verb == 'v':
+ // Go syntax
+ p.SharpV = p.Sharp
+ p.Sharp = false
+ // Struct-field syntax
+ p.PlusV = p.Plus
+ p.Plus = false
+ fallthrough
+ default:
+ p.ArgNum++
+ }
+ return true
+}
+
+// intFromArg gets the ArgNumth element of Args. On return, isInt reports
+// whether the argument has integer type.
+func (p *Parser) intFromArg() (num int, isInt bool) {
+ if p.ArgNum < len(p.Args) {
+ arg := p.Args[p.ArgNum]
+ num, isInt = arg.(int) // Almost always OK.
+ if !isInt {
+ // Work harder.
+ switch v := reflect.ValueOf(arg); v.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ n := v.Int()
+ if int64(int(n)) == n {
+ num = int(n)
+ isInt = true
+ }
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ n := v.Uint()
+ if int64(n) >= 0 && uint64(int(n)) == n {
+ num = int(n)
+ isInt = true
+ }
+ default:
+ // Already 0, false.
+ }
+ }
+ p.ArgNum++
+ if tooLarge(num) {
+ num = 0
+ isInt = false
+ }
+ }
+ return
+}
+
+// parseArgNumber returns the value of the bracketed number, minus 1
+// (explicit argument numbers are one-indexed but we want zero-indexed).
+// The opening bracket is known to be present at format[0].
+// The returned values are the index, the number of bytes to consume
+// up to the closing paren, if present, and whether the number parsed
+// ok. The bytes to consume will be 1 if no closing paren is present.
+func parseArgNumber(format string) (index int, wid int, ok bool) {
+ // There must be at least 3 bytes: [n].
+ if len(format) < 3 {
+ return 0, 1, false
+ }
+
+ // Find closing bracket.
+ for i := 1; i < len(format); i++ {
+ if format[i] == ']' {
+ width, ok, newi := parsenum(format, 1, i)
+ if !ok || newi != i {
+ return 0, i + 1, false
+ }
+ return width - 1, i + 1, true // arg numbers are one-indexed and skip paren.
+ }
+ }
+ return 0, 1, false
+}
+
+// updateArgNumber returns the next argument to evaluate, which is either the value of the passed-in
+// argNum or the value of the bracketed integer that begins format[i:]. It also returns
+// the new value of i, that is, the index of the next byte of the format to process.
+func (p *Parser) updateArgNumber(format string, i int) (newi int, found bool) {
+ if len(format) <= i || format[i] != '[' {
+ return i, false
+ }
+ p.Reordered = true
+ index, wid, ok := parseArgNumber(format[i:])
+ if ok && 0 <= index && index < len(p.Args) {
+ p.ArgNum = index
+ return i + wid, true
+ }
+ p.goodArgNum = false
+ return i + wid, ok
+}
+
+// tooLarge reports whether the magnitude of the integer is
+// too large to be used as a formatting width or precision.
+func tooLarge(x int) bool {
+ const max int = 1e6
+ return x > max || x < -max
+}
+
+// parsenum converts ASCII to integer. num is 0 (and isnum is false) if no number present.
+func parsenum(s string, start, end int) (num int, isnum bool, newi int) {
+ if start >= end {
+ return 0, false, end
+ }
+ for newi = start; newi < end && '0' <= s[newi] && s[newi] <= '9'; newi++ {
+ if tooLarge(num) {
+ return 0, false, end // Overflow; crazy long number most likely.
+ }
+ num = num*10 + int(s[newi]-'0')
+ isnum = true
+ }
+ return
+}
diff --git a/vendor/golang.org/x/text/internal/internal.go b/vendor/golang.org/x/text/internal/internal.go
new file mode 100644
index 0000000000..3cddbbdda8
--- /dev/null
+++ b/vendor/golang.org/x/text/internal/internal.go
@@ -0,0 +1,49 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package internal contains non-exported functionality that are used by
+// packages in the text repository.
+package internal // import "golang.org/x/text/internal"
+
+import (
+ "sort"
+
+ "golang.org/x/text/language"
+)
+
+// SortTags sorts tags in place.
+func SortTags(tags []language.Tag) {
+ sort.Sort(sorter(tags))
+}
+
+type sorter []language.Tag
+
+func (s sorter) Len() int {
+ return len(s)
+}
+
+func (s sorter) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+
+func (s sorter) Less(i, j int) bool {
+ return s[i].String() < s[j].String()
+}
+
+// UniqueTags sorts and filters duplicate tags in place and returns a slice with
+// only unique tags.
+func UniqueTags(tags []language.Tag) []language.Tag {
+ if len(tags) <= 1 {
+ return tags
+ }
+ SortTags(tags)
+ k := 0
+ for i := 1; i < len(tags); i++ {
+ if tags[k].String() < tags[i].String() {
+ k++
+ tags[k] = tags[i]
+ }
+ }
+ return tags[:k+1]
+}
diff --git a/vendor/golang.org/x/text/internal/match.go b/vendor/golang.org/x/text/internal/match.go
new file mode 100644
index 0000000000..1cc004a6d5
--- /dev/null
+++ b/vendor/golang.org/x/text/internal/match.go
@@ -0,0 +1,67 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package internal
+
+// This file contains matchers that implement CLDR inheritance.
+//
+// See https://unicode.org/reports/tr35/#Locale_Inheritance.
+//
+// Some of the inheritance described in this document is already handled by
+// the cldr package.
+
+import (
+ "golang.org/x/text/language"
+)
+
+// TODO: consider if (some of the) matching algorithm needs to be public after
+// getting some feel about what is generic and what is specific.
+
+// NewInheritanceMatcher returns a matcher that matches based on the inheritance
+// chain.
+//
+// The matcher uses canonicalization and the parent relationship to find a
+// match. The resulting match will always be either Und or a language with the
+// same language and script as the requested language. It will not match
+// languages for which there is understood to be mutual or one-directional
+// intelligibility.
+//
+// A Match will indicate an Exact match if the language matches after
+// canonicalization and High if the matched tag is a parent.
+func NewInheritanceMatcher(t []language.Tag) *InheritanceMatcher {
+ tags := &InheritanceMatcher{make(map[language.Tag]int)}
+ for i, tag := range t {
+ ct, err := language.All.Canonicalize(tag)
+ if err != nil {
+ ct = tag
+ }
+ tags.index[ct] = i
+ }
+ return tags
+}
+
+type InheritanceMatcher struct {
+ index map[language.Tag]int
+}
+
+func (m InheritanceMatcher) Match(want ...language.Tag) (language.Tag, int, language.Confidence) {
+ for _, t := range want {
+ ct, err := language.All.Canonicalize(t)
+ if err != nil {
+ ct = t
+ }
+ conf := language.Exact
+ for {
+ if index, ok := m.index[ct]; ok {
+ return ct, index, conf
+ }
+ if ct == language.Und {
+ break
+ }
+ ct = ct.Parent()
+ conf = language.High
+ }
+ }
+ return language.Und, 0, language.No
+}
diff --git a/vendor/golang.org/x/text/internal/number/common.go b/vendor/golang.org/x/text/internal/number/common.go
new file mode 100644
index 0000000000..a6e9c8e0d5
--- /dev/null
+++ b/vendor/golang.org/x/text/internal/number/common.go
@@ -0,0 +1,55 @@
+// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
+
+package number
+
+import (
+ "unicode/utf8"
+
+ "golang.org/x/text/internal/language/compact"
+)
+
+// A system identifies a CLDR numbering system.
+type system byte
+
+type systemData struct {
+ id system
+ digitSize byte // number of UTF-8 bytes per digit
+ zero [utf8.UTFMax]byte // UTF-8 sequence of zero digit.
+}
+
+// A SymbolType identifies a symbol of a specific kind.
+type SymbolType int
+
+const (
+ SymDecimal SymbolType = iota
+ SymGroup
+ SymList
+ SymPercentSign
+ SymPlusSign
+ SymMinusSign
+ SymExponential
+ SymSuperscriptingExponent
+ SymPerMille
+ SymInfinity
+ SymNan
+ SymTimeSeparator
+
+ NumSymbolTypes
+)
+
+const hasNonLatnMask = 0x8000
+
+// symOffset is an offset into altSymData if the bit indicated by hasNonLatnMask
+// is not 0 (with this bit masked out), and an offset into symIndex otherwise.
+//
+// TODO: this type can be a byte again if we use an indirection into altsymData
+// and introduce an alt -> offset slice (the length of this will be number of
+// alternatives plus 1). This also allows getting rid of the compactTag field
+// in altSymData. In total this will save about 1K.
+type symOffset uint16
+
+type altSymData struct {
+ compactTag compact.ID
+ symIndex symOffset
+ system system
+}
diff --git a/vendor/golang.org/x/text/internal/number/decimal.go b/vendor/golang.org/x/text/internal/number/decimal.go
new file mode 100644
index 0000000000..e128cf3437
--- /dev/null
+++ b/vendor/golang.org/x/text/internal/number/decimal.go
@@ -0,0 +1,500 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:generate stringer -type RoundingMode
+
+package number
+
+import (
+ "math"
+ "strconv"
+)
+
+// RoundingMode determines how a number is rounded to the desired precision.
+type RoundingMode byte
+
+const (
+ ToNearestEven RoundingMode = iota // towards the nearest integer, or towards an even number if equidistant.
+ ToNearestZero // towards the nearest integer, or towards zero if equidistant.
+ ToNearestAway // towards the nearest integer, or away from zero if equidistant.
+ ToPositiveInf // towards infinity
+ ToNegativeInf // towards negative infinity
+ ToZero // towards zero
+ AwayFromZero // away from zero
+ numModes
+)
+
+const maxIntDigits = 20
+
+// A Decimal represents a floating point number in decimal format.
+// Digits represents a number [0, 1.0), and the absolute value represented by
+// Decimal is Digits * 10^Exp. Leading and trailing zeros may be omitted and Exp
+// may point outside a valid position in Digits.
+//
+// Examples:
+//
+// Number Decimal
+// 12345 Digits: [1, 2, 3, 4, 5], Exp: 5
+// 12.345 Digits: [1, 2, 3, 4, 5], Exp: 2
+// 12000 Digits: [1, 2], Exp: 5
+// 12000.00 Digits: [1, 2], Exp: 5
+// 0.00123 Digits: [1, 2, 3], Exp: -2
+// 0 Digits: [], Exp: 0
+type Decimal struct {
+ digits
+
+ buf [maxIntDigits]byte
+}
+
+type digits struct {
+ Digits []byte // mantissa digits, big-endian
+ Exp int32 // exponent
+ Neg bool
+ Inf bool // Takes precedence over Digits and Exp.
+ NaN bool // Takes precedence over Inf.
+}
+
+// Digits represents a floating point number represented in digits of the
+// base in which a number is to be displayed. It is similar to Decimal, but
+// keeps track of trailing fraction zeros and the comma placement for
+// engineering notation. Digits must have at least one digit.
+//
+// Examples:
+//
+// Number Decimal
+// decimal
+// 12345 Digits: [1, 2, 3, 4, 5], Exp: 5 End: 5
+// 12.345 Digits: [1, 2, 3, 4, 5], Exp: 2 End: 5
+// 12000 Digits: [1, 2], Exp: 5 End: 5
+// 12000.00 Digits: [1, 2], Exp: 5 End: 7
+// 0.00123 Digits: [1, 2, 3], Exp: -2 End: 3
+// 0 Digits: [], Exp: 0 End: 1
+// scientific (actual exp is Exp - Comma)
+// 0e0 Digits: [0], Exp: 1, End: 1, Comma: 1
+// .0e0 Digits: [0], Exp: 0, End: 1, Comma: 0
+// 0.0e0 Digits: [0], Exp: 1, End: 2, Comma: 1
+// 1.23e4 Digits: [1, 2, 3], Exp: 5, End: 3, Comma: 1
+// .123e5 Digits: [1, 2, 3], Exp: 5, End: 3, Comma: 0
+// engineering
+// 12.3e3 Digits: [1, 2, 3], Exp: 5, End: 3, Comma: 2
+type Digits struct {
+ digits
+ // End indicates the end position of the number.
+ End int32 // For decimals Exp <= End. For scientific len(Digits) <= End.
+ // Comma is used for the comma position for scientific (always 0 or 1) and
+ // engineering notation (always 0, 1, 2, or 3).
+ Comma uint8
+ // IsScientific indicates whether this number is to be rendered as a
+ // scientific number.
+ IsScientific bool
+}
+
+func (d *Digits) NumFracDigits() int {
+ if d.Exp >= d.End {
+ return 0
+ }
+ return int(d.End - d.Exp)
+}
+
+// normalize returns a new Decimal with leading and trailing zeros removed.
+func (d *Decimal) normalize() (n Decimal) {
+ n = *d
+ b := n.Digits
+ // Strip leading zeros. Resulting number of digits is significant digits.
+ for len(b) > 0 && b[0] == 0 {
+ b = b[1:]
+ n.Exp--
+ }
+ // Strip trailing zeros
+ for len(b) > 0 && b[len(b)-1] == 0 {
+ b = b[:len(b)-1]
+ }
+ if len(b) == 0 {
+ n.Exp = 0
+ }
+ n.Digits = b
+ return n
+}
+
+func (d *Decimal) clear() {
+ b := d.Digits
+ if b == nil {
+ b = d.buf[:0]
+ }
+ *d = Decimal{}
+ d.Digits = b[:0]
+}
+
+func (x *Decimal) String() string {
+ if x.NaN {
+ return "NaN"
+ }
+ var buf []byte
+ if x.Neg {
+ buf = append(buf, '-')
+ }
+ if x.Inf {
+ buf = append(buf, "Inf"...)
+ return string(buf)
+ }
+ switch {
+ case len(x.Digits) == 0:
+ buf = append(buf, '0')
+ case x.Exp <= 0:
+ // 0.00ddd
+ buf = append(buf, "0."...)
+ buf = appendZeros(buf, -int(x.Exp))
+ buf = appendDigits(buf, x.Digits)
+
+ case /* 0 < */ int(x.Exp) < len(x.Digits):
+ // dd.ddd
+ buf = appendDigits(buf, x.Digits[:x.Exp])
+ buf = append(buf, '.')
+ buf = appendDigits(buf, x.Digits[x.Exp:])
+
+ default: // len(x.Digits) <= x.Exp
+ // ddd00
+ buf = appendDigits(buf, x.Digits)
+ buf = appendZeros(buf, int(x.Exp)-len(x.Digits))
+ }
+ return string(buf)
+}
+
+func appendDigits(buf []byte, digits []byte) []byte {
+ for _, c := range digits {
+ buf = append(buf, c+'0')
+ }
+ return buf
+}
+
+// appendZeros appends n 0 digits to buf and returns buf.
+func appendZeros(buf []byte, n int) []byte {
+ for ; n > 0; n-- {
+ buf = append(buf, '0')
+ }
+ return buf
+}
+
+func (d *digits) round(mode RoundingMode, n int) {
+ if n >= len(d.Digits) {
+ return
+ }
+ // Make rounding decision: The result mantissa is truncated ("rounded down")
+ // by default. Decide if we need to increment, or "round up", the (unsigned)
+ // mantissa.
+ inc := false
+ switch mode {
+ case ToNegativeInf:
+ inc = d.Neg
+ case ToPositiveInf:
+ inc = !d.Neg
+ case ToZero:
+ // nothing to do
+ case AwayFromZero:
+ inc = true
+ case ToNearestEven:
+ inc = d.Digits[n] > 5 || d.Digits[n] == 5 &&
+ (len(d.Digits) > n+1 || n == 0 || d.Digits[n-1]&1 != 0)
+ case ToNearestAway:
+ inc = d.Digits[n] >= 5
+ case ToNearestZero:
+ inc = d.Digits[n] > 5 || d.Digits[n] == 5 && len(d.Digits) > n+1
+ default:
+ panic("unreachable")
+ }
+ if inc {
+ d.roundUp(n)
+ } else {
+ d.roundDown(n)
+ }
+}
+
+// roundFloat rounds a floating point number.
+func (r RoundingMode) roundFloat(x float64) float64 {
+ // Make rounding decision: The result mantissa is truncated ("rounded down")
+ // by default. Decide if we need to increment, or "round up", the (unsigned)
+ // mantissa.
+ abs := x
+ if x < 0 {
+ abs = -x
+ }
+ i, f := math.Modf(abs)
+ if f == 0.0 {
+ return x
+ }
+ inc := false
+ switch r {
+ case ToNegativeInf:
+ inc = x < 0
+ case ToPositiveInf:
+ inc = x >= 0
+ case ToZero:
+ // nothing to do
+ case AwayFromZero:
+ inc = true
+ case ToNearestEven:
+ // TODO: check overflow
+ inc = f > 0.5 || f == 0.5 && int64(i)&1 != 0
+ case ToNearestAway:
+ inc = f >= 0.5
+ case ToNearestZero:
+ inc = f > 0.5
+ default:
+ panic("unreachable")
+ }
+ if inc {
+ i += 1
+ }
+ if abs != x {
+ i = -i
+ }
+ return i
+}
+
+func (x *digits) roundUp(n int) {
+ if n < 0 || n >= len(x.Digits) {
+ return // nothing to do
+ }
+ // find first digit < 9
+ for n > 0 && x.Digits[n-1] >= 9 {
+ n--
+ }
+
+ if n == 0 {
+ // all digits are 9s => round up to 1 and update exponent
+ x.Digits[0] = 1 // ok since len(x.Digits) > n
+ x.Digits = x.Digits[:1]
+ x.Exp++
+ return
+ }
+ x.Digits[n-1]++
+ x.Digits = x.Digits[:n]
+ // x already trimmed
+}
+
+func (x *digits) roundDown(n int) {
+ if n < 0 || n >= len(x.Digits) {
+ return // nothing to do
+ }
+ x.Digits = x.Digits[:n]
+ trim(x)
+}
+
+// trim cuts off any trailing zeros from x's mantissa;
+// they are meaningless for the value of x.
+func trim(x *digits) {
+ i := len(x.Digits)
+ for i > 0 && x.Digits[i-1] == 0 {
+ i--
+ }
+ x.Digits = x.Digits[:i]
+ if i == 0 {
+ x.Exp = 0
+ }
+}
+
+// A Converter converts a number into decimals according to the given rounding
+// criteria.
+type Converter interface {
+ Convert(d *Decimal, r RoundingContext)
+}
+
+const (
+ signed = true
+ unsigned = false
+)
+
+// Convert converts the given number to the decimal representation using the
+// supplied RoundingContext.
+func (d *Decimal) Convert(r RoundingContext, number interface{}) {
+ switch f := number.(type) {
+ case Converter:
+ d.clear()
+ f.Convert(d, r)
+ case float32:
+ d.ConvertFloat(r, float64(f), 32)
+ case float64:
+ d.ConvertFloat(r, f, 64)
+ case int:
+ d.ConvertInt(r, signed, uint64(f))
+ case int8:
+ d.ConvertInt(r, signed, uint64(f))
+ case int16:
+ d.ConvertInt(r, signed, uint64(f))
+ case int32:
+ d.ConvertInt(r, signed, uint64(f))
+ case int64:
+ d.ConvertInt(r, signed, uint64(f))
+ case uint:
+ d.ConvertInt(r, unsigned, uint64(f))
+ case uint8:
+ d.ConvertInt(r, unsigned, uint64(f))
+ case uint16:
+ d.ConvertInt(r, unsigned, uint64(f))
+ case uint32:
+ d.ConvertInt(r, unsigned, uint64(f))
+ case uint64:
+ d.ConvertInt(r, unsigned, f)
+
+ default:
+ d.NaN = true
+ // TODO:
+ // case string: if produced by strconv, allows for easy arbitrary pos.
+ // case reflect.Value:
+ // case big.Float
+ // case big.Int
+ // case big.Rat?
+ // catch underlyings using reflect or will this already be done by the
+ // message package?
+ }
+}
+
+// ConvertInt converts an integer to decimals.
+func (d *Decimal) ConvertInt(r RoundingContext, signed bool, x uint64) {
+ if r.Increment > 0 {
+ // TODO: if uint64 is too large, fall back to float64
+ if signed {
+ d.ConvertFloat(r, float64(int64(x)), 64)
+ } else {
+ d.ConvertFloat(r, float64(x), 64)
+ }
+ return
+ }
+ d.clear()
+ if signed && int64(x) < 0 {
+ x = uint64(-int64(x))
+ d.Neg = true
+ }
+ d.fillIntDigits(x)
+ d.Exp = int32(len(d.Digits))
+}
+
+// ConvertFloat converts a floating point number to decimals.
+func (d *Decimal) ConvertFloat(r RoundingContext, x float64, size int) {
+ d.clear()
+ if math.IsNaN(x) {
+ d.NaN = true
+ return
+ }
+ // Simple case: decimal notation
+ if r.Increment > 0 {
+ scale := int(r.IncrementScale)
+ mult := 1.0
+ if scale >= len(scales) {
+ mult = math.Pow(10, float64(scale))
+ } else {
+ mult = scales[scale]
+ }
+ // We multiply x instead of dividing inc as it gives less rounding
+ // issues.
+ x *= mult
+ x /= float64(r.Increment)
+ x = r.Mode.roundFloat(x)
+ x *= float64(r.Increment)
+ x /= mult
+ }
+
+ abs := x
+ if x < 0 {
+ d.Neg = true
+ abs = -x
+ }
+ if math.IsInf(abs, 1) {
+ d.Inf = true
+ return
+ }
+
+ // By default we get the exact decimal representation.
+ verb := byte('g')
+ prec := -1
+ // As the strconv API does not return the rounding accuracy, we can only
+ // round using ToNearestEven.
+ if r.Mode == ToNearestEven {
+ if n := r.RoundSignificantDigits(); n >= 0 {
+ prec = n
+ } else if n = r.RoundFractionDigits(); n >= 0 {
+ prec = n
+ verb = 'f'
+ }
+ } else {
+ // TODO: At this point strconv's rounding is imprecise to the point that
+ // it is not usable for this purpose.
+ // See https://github.com/golang/go/issues/21714
+ // If rounding is requested, we ask for a large number of digits and
+ // round from there to simulate rounding only once.
+ // Ideally we would have strconv export an AppendDigits that would take
+ // a rounding mode and/or return an accuracy. Something like this would
+ // work:
+ // AppendDigits(dst []byte, x float64, base, size, prec int) (digits []byte, exp, accuracy int)
+ hasPrec := r.RoundSignificantDigits() >= 0
+ hasScale := r.RoundFractionDigits() >= 0
+ if hasPrec || hasScale {
+ // prec is the number of mantissa bits plus some extra for safety.
+ // We need at least the number of mantissa bits as decimals to
+ // accurately represent the floating point without rounding, as each
+ // bit requires one more decimal to represent: 0.5, 0.25, 0.125, ...
+ prec = 60
+ }
+ }
+
+ b := strconv.AppendFloat(d.Digits[:0], abs, verb, prec, size)
+ i := 0
+ k := 0
+ beforeDot := 1
+ for i < len(b) {
+ if c := b[i]; '0' <= c && c <= '9' {
+ b[k] = c - '0'
+ k++
+ d.Exp += int32(beforeDot)
+ } else if c == '.' {
+ beforeDot = 0
+ d.Exp = int32(k)
+ } else {
+ break
+ }
+ i++
+ }
+ d.Digits = b[:k]
+ if i != len(b) {
+ i += len("e")
+ pSign := i
+ exp := 0
+ for i++; i < len(b); i++ {
+ exp *= 10
+ exp += int(b[i] - '0')
+ }
+ if b[pSign] == '-' {
+ exp = -exp
+ }
+ d.Exp = int32(exp) + 1
+ }
+}
+
+func (d *Decimal) fillIntDigits(x uint64) {
+ if cap(d.Digits) < maxIntDigits {
+ d.Digits = d.buf[:]
+ } else {
+ d.Digits = d.buf[:maxIntDigits]
+ }
+ i := 0
+ for ; x > 0; x /= 10 {
+ d.Digits[i] = byte(x % 10)
+ i++
+ }
+ d.Digits = d.Digits[:i]
+ for p := 0; p < i; p++ {
+ i--
+ d.Digits[p], d.Digits[i] = d.Digits[i], d.Digits[p]
+ }
+}
+
+var scales [70]float64
+
+func init() {
+ x := 1.0
+ for i := range scales {
+ scales[i] = x
+ x *= 10
+ }
+}
diff --git a/vendor/golang.org/x/text/internal/number/format.go b/vendor/golang.org/x/text/internal/number/format.go
new file mode 100644
index 0000000000..cd94c5dc4e
--- /dev/null
+++ b/vendor/golang.org/x/text/internal/number/format.go
@@ -0,0 +1,535 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package number
+
+import (
+ "strconv"
+ "unicode/utf8"
+
+ "golang.org/x/text/language"
+)
+
+// TODO:
+// - grouping of fractions
+// - allow user-defined superscript notation (such as 4)
+// - same for non-breaking spaces, like
+
+// A VisibleDigits computes digits, comma placement and trailing zeros as they
+// will be shown to the user.
+type VisibleDigits interface {
+ Digits(buf []byte, t language.Tag, scale int) Digits
+ // TODO: Do we also need to add the verb or pass a format.State?
+}
+
+// Formatting proceeds along the following lines:
+// 0) Compose rounding information from format and context.
+// 1) Convert a number into a Decimal.
+// 2) Sanitize Decimal by adding trailing zeros, removing leading digits, and
+// (non-increment) rounding. The Decimal that results from this is suitable
+// for determining the plural form.
+// 3) Render the Decimal in the localized form.
+
+// Formatter contains all the information needed to render a number.
+type Formatter struct {
+ Pattern
+ Info
+}
+
+func (f *Formatter) init(t language.Tag, index []uint8) {
+ f.Info = InfoFromTag(t)
+ f.Pattern = formats[index[tagToID(t)]]
+}
+
+// InitPattern initializes a Formatter for the given Pattern.
+func (f *Formatter) InitPattern(t language.Tag, pat *Pattern) {
+ f.Info = InfoFromTag(t)
+ f.Pattern = *pat
+}
+
+// InitDecimal initializes a Formatter using the default Pattern for the given
+// language.
+func (f *Formatter) InitDecimal(t language.Tag) {
+ f.init(t, tagToDecimal)
+}
+
+// InitScientific initializes a Formatter using the default Pattern for the
+// given language.
+func (f *Formatter) InitScientific(t language.Tag) {
+ f.init(t, tagToScientific)
+ f.Pattern.MinFractionDigits = 0
+ f.Pattern.MaxFractionDigits = -1
+}
+
+// InitEngineering initializes a Formatter using the default Pattern for the
+// given language.
+func (f *Formatter) InitEngineering(t language.Tag) {
+ f.init(t, tagToScientific)
+ f.Pattern.MinFractionDigits = 0
+ f.Pattern.MaxFractionDigits = -1
+ f.Pattern.MaxIntegerDigits = 3
+ f.Pattern.MinIntegerDigits = 1
+}
+
+// InitPercent initializes a Formatter using the default Pattern for the given
+// language.
+func (f *Formatter) InitPercent(t language.Tag) {
+ f.init(t, tagToPercent)
+}
+
+// InitPerMille initializes a Formatter using the default Pattern for the given
+// language.
+func (f *Formatter) InitPerMille(t language.Tag) {
+ f.init(t, tagToPercent)
+ f.Pattern.DigitShift = 3
+}
+
+func (f *Formatter) Append(dst []byte, x interface{}) []byte {
+ var d Decimal
+ r := f.RoundingContext
+ d.Convert(r, x)
+ return f.Render(dst, FormatDigits(&d, r))
+}
+
+func FormatDigits(d *Decimal, r RoundingContext) Digits {
+ if r.isScientific() {
+ return scientificVisibleDigits(r, d)
+ }
+ return decimalVisibleDigits(r, d)
+}
+
+func (f *Formatter) Format(dst []byte, d *Decimal) []byte {
+ return f.Render(dst, FormatDigits(d, f.RoundingContext))
+}
+
+func (f *Formatter) Render(dst []byte, d Digits) []byte {
+ var result []byte
+ var postPrefix, preSuffix int
+ if d.IsScientific {
+ result, postPrefix, preSuffix = appendScientific(dst, f, &d)
+ } else {
+ result, postPrefix, preSuffix = appendDecimal(dst, f, &d)
+ }
+ if f.PadRune == 0 {
+ return result
+ }
+ width := int(f.FormatWidth)
+ if count := utf8.RuneCount(result); count < width {
+ insertPos := 0
+ switch f.Flags & PadMask {
+ case PadAfterPrefix:
+ insertPos = postPrefix
+ case PadBeforeSuffix:
+ insertPos = preSuffix
+ case PadAfterSuffix:
+ insertPos = len(result)
+ }
+ num := width - count
+ pad := [utf8.UTFMax]byte{' '}
+ sz := 1
+ if r := f.PadRune; r != 0 {
+ sz = utf8.EncodeRune(pad[:], r)
+ }
+ extra := sz * num
+ if n := len(result) + extra; n < cap(result) {
+ result = result[:n]
+ copy(result[insertPos+extra:], result[insertPos:])
+ } else {
+ buf := make([]byte, n)
+ copy(buf, result[:insertPos])
+ copy(buf[insertPos+extra:], result[insertPos:])
+ result = buf
+ }
+ for ; num > 0; num-- {
+ insertPos += copy(result[insertPos:], pad[:sz])
+ }
+ }
+ return result
+}
+
+// decimalVisibleDigits converts d according to the RoundingContext. Note that
+// the exponent may change as a result of this operation.
+func decimalVisibleDigits(r RoundingContext, d *Decimal) Digits {
+ if d.NaN || d.Inf {
+ return Digits{digits: digits{Neg: d.Neg, NaN: d.NaN, Inf: d.Inf}}
+ }
+ n := Digits{digits: d.normalize().digits}
+
+ exp := n.Exp
+ exp += int32(r.DigitShift)
+
+ // Cap integer digits. Remove *most-significant* digits.
+ if r.MaxIntegerDigits > 0 {
+ if p := int(exp) - int(r.MaxIntegerDigits); p > 0 {
+ if p > len(n.Digits) {
+ p = len(n.Digits)
+ }
+ if n.Digits = n.Digits[p:]; len(n.Digits) == 0 {
+ exp = 0
+ } else {
+ exp -= int32(p)
+ }
+ // Strip leading zeros.
+ for len(n.Digits) > 0 && n.Digits[0] == 0 {
+ n.Digits = n.Digits[1:]
+ exp--
+ }
+ }
+ }
+
+ // Rounding if not already done by Convert.
+ p := len(n.Digits)
+ if maxSig := int(r.MaxSignificantDigits); maxSig > 0 {
+ p = maxSig
+ }
+ if maxFrac := int(r.MaxFractionDigits); maxFrac >= 0 {
+ if cap := int(exp) + maxFrac; cap < p {
+ p = int(exp) + maxFrac
+ }
+ if p < 0 {
+ p = 0
+ }
+ }
+ n.round(r.Mode, p)
+
+ // set End (trailing zeros)
+ n.End = int32(len(n.Digits))
+ if n.End == 0 {
+ exp = 0
+ if r.MinFractionDigits > 0 {
+ n.End = int32(r.MinFractionDigits)
+ }
+ if p := int32(r.MinSignificantDigits) - 1; p > n.End {
+ n.End = p
+ }
+ } else {
+ if end := exp + int32(r.MinFractionDigits); end > n.End {
+ n.End = end
+ }
+ if n.End < int32(r.MinSignificantDigits) {
+ n.End = int32(r.MinSignificantDigits)
+ }
+ }
+ n.Exp = exp
+ return n
+}
+
+// appendDecimal appends a formatted number to dst. It returns two possible
+// insertion points for padding.
+func appendDecimal(dst []byte, f *Formatter, n *Digits) (b []byte, postPre, preSuf int) {
+ if dst, ok := f.renderSpecial(dst, n); ok {
+ return dst, 0, len(dst)
+ }
+ digits := n.Digits
+ exp := n.Exp
+
+ // Split in integer and fraction part.
+ var intDigits, fracDigits []byte
+ numInt := 0
+ numFrac := int(n.End - n.Exp)
+ if exp > 0 {
+ numInt = int(exp)
+ if int(exp) >= len(digits) { // ddddd | ddddd00
+ intDigits = digits
+ } else { // ddd.dd
+ intDigits = digits[:exp]
+ fracDigits = digits[exp:]
+ }
+ } else {
+ fracDigits = digits
+ }
+
+ neg := n.Neg
+ affix, suffix := f.getAffixes(neg)
+ dst = appendAffix(dst, f, affix, neg)
+ savedLen := len(dst)
+
+ minInt := int(f.MinIntegerDigits)
+ if minInt == 0 && f.MinSignificantDigits > 0 {
+ minInt = 1
+ }
+ // add leading zeros
+ for i := minInt; i > numInt; i-- {
+ dst = f.AppendDigit(dst, 0)
+ if f.needsSep(i) {
+ dst = append(dst, f.Symbol(SymGroup)...)
+ }
+ }
+ i := 0
+ for ; i < len(intDigits); i++ {
+ dst = f.AppendDigit(dst, intDigits[i])
+ if f.needsSep(numInt - i) {
+ dst = append(dst, f.Symbol(SymGroup)...)
+ }
+ }
+ for ; i < numInt; i++ {
+ dst = f.AppendDigit(dst, 0)
+ if f.needsSep(numInt - i) {
+ dst = append(dst, f.Symbol(SymGroup)...)
+ }
+ }
+
+ if numFrac > 0 || f.Flags&AlwaysDecimalSeparator != 0 {
+ dst = append(dst, f.Symbol(SymDecimal)...)
+ }
+ // Add trailing zeros
+ i = 0
+ for n := -int(n.Exp); i < n; i++ {
+ dst = f.AppendDigit(dst, 0)
+ }
+ for _, d := range fracDigits {
+ i++
+ dst = f.AppendDigit(dst, d)
+ }
+ for ; i < numFrac; i++ {
+ dst = f.AppendDigit(dst, 0)
+ }
+ return appendAffix(dst, f, suffix, neg), savedLen, len(dst)
+}
+
+func scientificVisibleDigits(r RoundingContext, d *Decimal) Digits {
+ if d.NaN || d.Inf {
+ return Digits{digits: digits{Neg: d.Neg, NaN: d.NaN, Inf: d.Inf}}
+ }
+ n := Digits{digits: d.normalize().digits, IsScientific: true}
+
+ // Normalize to have at least one digit. This simplifies engineering
+ // notation.
+ if len(n.Digits) == 0 {
+ n.Digits = append(n.Digits, 0)
+ n.Exp = 1
+ }
+
+ // Significant digits are transformed by the parser for scientific notation
+ // and do not need to be handled here.
+ maxInt, numInt := int(r.MaxIntegerDigits), int(r.MinIntegerDigits)
+ if numInt == 0 {
+ numInt = 1
+ }
+
+ // If a maximum number of integers is specified, the minimum must be 1
+ // and the exponent is grouped by this number (e.g. for engineering)
+ if maxInt > numInt {
+ // Correct the exponent to reflect a single integer digit.
+ numInt = 1
+ // engineering
+ // 0.01234 ([12345]e-1) -> 1.2345e-2 12.345e-3
+ // 12345 ([12345]e+5) -> 1.2345e4 12.345e3
+ d := int(n.Exp-1) % maxInt
+ if d < 0 {
+ d += maxInt
+ }
+ numInt += d
+ }
+
+ p := len(n.Digits)
+ if maxSig := int(r.MaxSignificantDigits); maxSig > 0 {
+ p = maxSig
+ }
+ if maxFrac := int(r.MaxFractionDigits); maxFrac >= 0 && numInt+maxFrac < p {
+ p = numInt + maxFrac
+ }
+ n.round(r.Mode, p)
+
+ n.Comma = uint8(numInt)
+ n.End = int32(len(n.Digits))
+ if minSig := int32(r.MinFractionDigits) + int32(numInt); n.End < minSig {
+ n.End = minSig
+ }
+ return n
+}
+
+// appendScientific appends a formatted number to dst. It returns two possible
+// insertion points for padding.
+func appendScientific(dst []byte, f *Formatter, n *Digits) (b []byte, postPre, preSuf int) {
+ if dst, ok := f.renderSpecial(dst, n); ok {
+ return dst, 0, 0
+ }
+ digits := n.Digits
+ numInt := int(n.Comma)
+ numFrac := int(n.End) - int(n.Comma)
+
+ var intDigits, fracDigits []byte
+ if numInt <= len(digits) {
+ intDigits = digits[:numInt]
+ fracDigits = digits[numInt:]
+ } else {
+ intDigits = digits
+ }
+ neg := n.Neg
+ affix, suffix := f.getAffixes(neg)
+ dst = appendAffix(dst, f, affix, neg)
+ savedLen := len(dst)
+
+ i := 0
+ for ; i < len(intDigits); i++ {
+ dst = f.AppendDigit(dst, intDigits[i])
+ if f.needsSep(numInt - i) {
+ dst = append(dst, f.Symbol(SymGroup)...)
+ }
+ }
+ for ; i < numInt; i++ {
+ dst = f.AppendDigit(dst, 0)
+ if f.needsSep(numInt - i) {
+ dst = append(dst, f.Symbol(SymGroup)...)
+ }
+ }
+
+ if numFrac > 0 || f.Flags&AlwaysDecimalSeparator != 0 {
+ dst = append(dst, f.Symbol(SymDecimal)...)
+ }
+ i = 0
+ for ; i < len(fracDigits); i++ {
+ dst = f.AppendDigit(dst, fracDigits[i])
+ }
+ for ; i < numFrac; i++ {
+ dst = f.AppendDigit(dst, 0)
+ }
+
+ // exp
+ buf := [12]byte{}
+ // TODO: use exponential if superscripting is not available (no Latin
+ // numbers or no tags) and use exponential in all other cases.
+ exp := n.Exp - int32(n.Comma)
+ exponential := f.Symbol(SymExponential)
+ if exponential == "E" {
+ dst = append(dst, "\u202f"...) // NARROW NO-BREAK SPACE
+ dst = append(dst, f.Symbol(SymSuperscriptingExponent)...)
+ dst = append(dst, "\u202f"...) // NARROW NO-BREAK SPACE
+ dst = f.AppendDigit(dst, 1)
+ dst = f.AppendDigit(dst, 0)
+ switch {
+ case exp < 0:
+ dst = append(dst, superMinus...)
+ exp = -exp
+ case f.Flags&AlwaysExpSign != 0:
+ dst = append(dst, superPlus...)
+ }
+ b = strconv.AppendUint(buf[:0], uint64(exp), 10)
+ for i := len(b); i < int(f.MinExponentDigits); i++ {
+ dst = append(dst, superDigits[0]...)
+ }
+ for _, c := range b {
+ dst = append(dst, superDigits[c-'0']...)
+ }
+ } else {
+ dst = append(dst, exponential...)
+ switch {
+ case exp < 0:
+ dst = append(dst, f.Symbol(SymMinusSign)...)
+ exp = -exp
+ case f.Flags&AlwaysExpSign != 0:
+ dst = append(dst, f.Symbol(SymPlusSign)...)
+ }
+ b = strconv.AppendUint(buf[:0], uint64(exp), 10)
+ for i := len(b); i < int(f.MinExponentDigits); i++ {
+ dst = f.AppendDigit(dst, 0)
+ }
+ for _, c := range b {
+ dst = f.AppendDigit(dst, c-'0')
+ }
+ }
+ return appendAffix(dst, f, suffix, neg), savedLen, len(dst)
+}
+
+const (
+ superMinus = "\u207B" // SUPERSCRIPT HYPHEN-MINUS
+ superPlus = "\u207A" // SUPERSCRIPT PLUS SIGN
+)
+
+var (
+ // Note: the digits are not sequential!!!
+ superDigits = []string{
+ "\u2070", // SUPERSCRIPT DIGIT ZERO
+ "\u00B9", // SUPERSCRIPT DIGIT ONE
+ "\u00B2", // SUPERSCRIPT DIGIT TWO
+ "\u00B3", // SUPERSCRIPT DIGIT THREE
+ "\u2074", // SUPERSCRIPT DIGIT FOUR
+ "\u2075", // SUPERSCRIPT DIGIT FIVE
+ "\u2076", // SUPERSCRIPT DIGIT SIX
+ "\u2077", // SUPERSCRIPT DIGIT SEVEN
+ "\u2078", // SUPERSCRIPT DIGIT EIGHT
+ "\u2079", // SUPERSCRIPT DIGIT NINE
+ }
+)
+
+func (f *Formatter) getAffixes(neg bool) (affix, suffix string) {
+ str := f.Affix
+ if str != "" {
+ if f.NegOffset > 0 {
+ if neg {
+ str = str[f.NegOffset:]
+ } else {
+ str = str[:f.NegOffset]
+ }
+ }
+ sufStart := 1 + str[0]
+ affix = str[1:sufStart]
+ suffix = str[sufStart+1:]
+ }
+ // TODO: introduce a NeedNeg sign to indicate if the left pattern already
+ // has a sign marked?
+ if f.NegOffset == 0 && (neg || f.Flags&AlwaysSign != 0) {
+ affix = "-" + affix
+ }
+ return affix, suffix
+}
+
+func (f *Formatter) renderSpecial(dst []byte, d *Digits) (b []byte, ok bool) {
+ if d.NaN {
+ return fmtNaN(dst, f), true
+ }
+ if d.Inf {
+ return fmtInfinite(dst, f, d), true
+ }
+ return dst, false
+}
+
+func fmtNaN(dst []byte, f *Formatter) []byte {
+ return append(dst, f.Symbol(SymNan)...)
+}
+
+func fmtInfinite(dst []byte, f *Formatter, d *Digits) []byte {
+ affix, suffix := f.getAffixes(d.Neg)
+ dst = appendAffix(dst, f, affix, d.Neg)
+ dst = append(dst, f.Symbol(SymInfinity)...)
+ dst = appendAffix(dst, f, suffix, d.Neg)
+ return dst
+}
+
+func appendAffix(dst []byte, f *Formatter, affix string, neg bool) []byte {
+ quoting := false
+ escaping := false
+ for _, r := range affix {
+ switch {
+ case escaping:
+ // escaping occurs both inside and outside of quotes
+ dst = append(dst, string(r)...)
+ escaping = false
+ case r == '\\':
+ escaping = true
+ case r == '\'':
+ quoting = !quoting
+ case quoting:
+ dst = append(dst, string(r)...)
+ case r == '%':
+ if f.DigitShift == 3 {
+ dst = append(dst, f.Symbol(SymPerMille)...)
+ } else {
+ dst = append(dst, f.Symbol(SymPercentSign)...)
+ }
+ case r == '-' || r == '+':
+ if neg {
+ dst = append(dst, f.Symbol(SymMinusSign)...)
+ } else if f.Flags&ElideSign == 0 {
+ dst = append(dst, f.Symbol(SymPlusSign)...)
+ } else {
+ dst = append(dst, ' ')
+ }
+ default:
+ dst = append(dst, string(r)...)
+ }
+ }
+ return dst
+}
diff --git a/vendor/golang.org/x/text/internal/number/number.go b/vendor/golang.org/x/text/internal/number/number.go
new file mode 100644
index 0000000000..e1d933c3f7
--- /dev/null
+++ b/vendor/golang.org/x/text/internal/number/number.go
@@ -0,0 +1,152 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:generate go run gen.go gen_common.go
+
+// Package number contains tools and data for formatting numbers.
+package number
+
+import (
+ "unicode/utf8"
+
+ "golang.org/x/text/internal/language/compact"
+ "golang.org/x/text/language"
+)
+
+// Info holds number formatting configuration data.
+type Info struct {
+ system systemData // numbering system information
+ symIndex symOffset // index to symbols
+}
+
+// InfoFromLangID returns a Info for the given compact language identifier and
+// numbering system identifier. If system is the empty string, the default
+// numbering system will be taken for that language.
+func InfoFromLangID(compactIndex compact.ID, numberSystem string) Info {
+ p := langToDefaults[compactIndex]
+ // Lookup the entry for the language.
+ pSymIndex := symOffset(0) // Default: Latin, default symbols
+ system, ok := systemMap[numberSystem]
+ if !ok {
+ // Take the value for the default numbering system. This is by far the
+ // most common case as an alternative numbering system is hardly used.
+ if p&hasNonLatnMask == 0 { // Latn digits.
+ pSymIndex = p
+ } else { // Non-Latn or multiple numbering systems.
+ // Take the first entry from the alternatives list.
+ data := langToAlt[p&^hasNonLatnMask]
+ pSymIndex = data.symIndex
+ system = data.system
+ }
+ } else {
+ langIndex := compactIndex
+ ns := system
+ outerLoop:
+ for ; ; p = langToDefaults[langIndex] {
+ if p&hasNonLatnMask == 0 {
+ if ns == 0 {
+ // The index directly points to the symbol data.
+ pSymIndex = p
+ break
+ }
+ // Move to the parent and retry.
+ langIndex = langIndex.Parent()
+ } else {
+ // The index points to a list of symbol data indexes.
+ for _, e := range langToAlt[p&^hasNonLatnMask:] {
+ if e.compactTag != langIndex {
+ if langIndex == 0 {
+ // The CLDR root defines full symbol information for
+ // all numbering systems (even though mostly by
+ // means of aliases). Fall back to the default entry
+ // for Latn if there is no data for the numbering
+ // system of this language.
+ if ns == 0 {
+ break
+ }
+ // Fall back to Latin and start from the original
+ // language. See
+ // https://unicode.org/reports/tr35/#Locale_Inheritance.
+ ns = numLatn
+ langIndex = compactIndex
+ continue outerLoop
+ }
+ // Fall back to parent.
+ langIndex = langIndex.Parent()
+ } else if e.system == ns {
+ pSymIndex = e.symIndex
+ break outerLoop
+ }
+ }
+ }
+ }
+ }
+ if int(system) >= len(numSysData) { // algorithmic
+ // Will generate ASCII digits in case the user inadvertently calls
+ // WriteDigit or Digit on it.
+ d := numSysData[0]
+ d.id = system
+ return Info{
+ system: d,
+ symIndex: pSymIndex,
+ }
+ }
+ return Info{
+ system: numSysData[system],
+ symIndex: pSymIndex,
+ }
+}
+
+// InfoFromTag returns a Info for the given language tag.
+func InfoFromTag(t language.Tag) Info {
+ return InfoFromLangID(tagToID(t), t.TypeForKey("nu"))
+}
+
+// IsDecimal reports if the numbering system can convert decimal to native
+// symbols one-to-one.
+func (n Info) IsDecimal() bool {
+ return int(n.system.id) < len(numSysData)
+}
+
+// WriteDigit writes the UTF-8 sequence for n corresponding to the given ASCII
+// digit to dst and reports the number of bytes written. dst must be large
+// enough to hold the rune (can be up to utf8.UTFMax bytes).
+func (n Info) WriteDigit(dst []byte, asciiDigit rune) int {
+ copy(dst, n.system.zero[:n.system.digitSize])
+ dst[n.system.digitSize-1] += byte(asciiDigit - '0')
+ return int(n.system.digitSize)
+}
+
+// AppendDigit appends the UTF-8 sequence for n corresponding to the given digit
+// to dst and reports the number of bytes written. dst must be large enough to
+// hold the rune (can be up to utf8.UTFMax bytes).
+func (n Info) AppendDigit(dst []byte, digit byte) []byte {
+ dst = append(dst, n.system.zero[:n.system.digitSize]...)
+ dst[len(dst)-1] += digit
+ return dst
+}
+
+// Digit returns the digit for the numbering system for the corresponding ASCII
+// value. For example, ni.Digit('3') could return '三'. Note that the argument
+// is the rune constant '3', which equals 51, not the integer constant 3.
+func (n Info) Digit(asciiDigit rune) rune {
+ var x [utf8.UTFMax]byte
+ n.WriteDigit(x[:], asciiDigit)
+ r, _ := utf8.DecodeRune(x[:])
+ return r
+}
+
+// Symbol returns the string for the given symbol type.
+func (n Info) Symbol(t SymbolType) string {
+ return symData.Elem(int(symIndex[n.symIndex][t]))
+}
+
+func formatForLang(t language.Tag, index []byte) *Pattern {
+ return &formats[index[tagToID(t)]]
+}
+
+func tagToID(t language.Tag) compact.ID {
+ id, _ := compact.RegionalID(compact.Tag(t))
+ return id
+}
diff --git a/vendor/golang.org/x/text/internal/number/pattern.go b/vendor/golang.org/x/text/internal/number/pattern.go
new file mode 100644
index 0000000000..06e59559a9
--- /dev/null
+++ b/vendor/golang.org/x/text/internal/number/pattern.go
@@ -0,0 +1,485 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package number
+
+import (
+ "errors"
+ "unicode/utf8"
+)
+
+// This file contains a parser for the CLDR number patterns as described in
+// https://unicode.org/reports/tr35/tr35-numbers.html#Number_Format_Patterns.
+//
+// The following BNF is derived from this standard.
+//
+// pattern := subpattern (';' subpattern)?
+// subpattern := affix? number exponent? affix?
+// number := decimal | sigDigits
+// decimal := '#'* '0'* ('.' fraction)? | '#' | '0'
+// fraction := '0'* '#'*
+// sigDigits := '#'* '@' '@'* '#'*
+// exponent := 'E' '+'? '0'* '0'
+// padSpec := '*' \L
+//
+// Notes:
+// - An affix pattern may contain any runes, but runes with special meaning
+// should be escaped.
+// - Sequences of digits, '#', and '@' in decimal and sigDigits may have
+// interstitial commas.
+
+// TODO: replace special characters in affixes (-, +, ¤) with control codes.
+
+// Pattern holds information for formatting numbers. It is designed to hold
+// information from CLDR number patterns.
+//
+// This pattern is precompiled for all patterns for all languages. Even though
+// the number of patterns is not very large, we want to keep this small.
+//
+// This type is only intended for internal use.
+type Pattern struct {
+ RoundingContext
+
+ Affix string // includes prefix and suffix. First byte is prefix length.
+ Offset uint16 // Offset into Affix for prefix and suffix
+ NegOffset uint16 // Offset into Affix for negative prefix and suffix or 0.
+ PadRune rune
+ FormatWidth uint16
+
+ GroupingSize [2]uint8
+ Flags PatternFlag
+}
+
+// A RoundingContext indicates how a number should be converted to digits.
+// It contains all information needed to determine the "visible digits" as
+// required by the pluralization rules.
+type RoundingContext struct {
+ // TODO: unify these two fields so that there is a more unambiguous meaning
+ // of how precision is handled.
+ MaxSignificantDigits int16 // -1 is unlimited
+ MaxFractionDigits int16 // -1 is unlimited
+
+ Increment uint32
+ IncrementScale uint8 // May differ from printed scale.
+
+ Mode RoundingMode
+
+ DigitShift uint8 // Number of decimals to shift. Used for % and ‰.
+
+ // Number of digits.
+ MinIntegerDigits uint8
+
+ MaxIntegerDigits uint8
+ MinFractionDigits uint8
+ MinSignificantDigits uint8
+
+ MinExponentDigits uint8
+}
+
+// RoundSignificantDigits returns the number of significant digits an
+// implementation of Convert may round to or n < 0 if there is no maximum or
+// a maximum is not recommended.
+func (r *RoundingContext) RoundSignificantDigits() (n int) {
+ if r.MaxFractionDigits == 0 && r.MaxSignificantDigits > 0 {
+ return int(r.MaxSignificantDigits)
+ } else if r.isScientific() && r.MaxIntegerDigits == 1 {
+ if r.MaxSignificantDigits == 0 ||
+ int(r.MaxFractionDigits+1) == int(r.MaxSignificantDigits) {
+ // Note: don't add DigitShift: it is only used for decimals.
+ return int(r.MaxFractionDigits) + 1
+ }
+ }
+ return -1
+}
+
+// RoundFractionDigits returns the number of fraction digits an implementation
+// of Convert may round to or n < 0 if there is no maximum or a maximum is not
+// recommended.
+func (r *RoundingContext) RoundFractionDigits() (n int) {
+ if r.MinExponentDigits == 0 &&
+ r.MaxSignificantDigits == 0 &&
+ r.MaxFractionDigits >= 0 {
+ return int(r.MaxFractionDigits) + int(r.DigitShift)
+ }
+ return -1
+}
+
+// SetScale fixes the RoundingContext to a fixed number of fraction digits.
+func (r *RoundingContext) SetScale(scale int) {
+ r.MinFractionDigits = uint8(scale)
+ r.MaxFractionDigits = int16(scale)
+}
+
+func (r *RoundingContext) SetPrecision(prec int) {
+ r.MaxSignificantDigits = int16(prec)
+}
+
+func (r *RoundingContext) isScientific() bool {
+ return r.MinExponentDigits > 0
+}
+
+func (f *Pattern) needsSep(pos int) bool {
+ p := pos - 1
+ size := int(f.GroupingSize[0])
+ if size == 0 || p == 0 {
+ return false
+ }
+ if p == size {
+ return true
+ }
+ if p -= size; p < 0 {
+ return false
+ }
+ // TODO: make second groupingsize the same as first if 0 so that we can
+ // avoid this check.
+ if x := int(f.GroupingSize[1]); x != 0 {
+ size = x
+ }
+ return p%size == 0
+}
+
+// A PatternFlag is a bit mask for the flag field of a Pattern.
+type PatternFlag uint8
+
+const (
+ AlwaysSign PatternFlag = 1 << iota
+ ElideSign // Use space instead of plus sign. AlwaysSign must be true.
+ AlwaysExpSign
+ AlwaysDecimalSeparator
+ ParenthesisForNegative // Common pattern. Saves space.
+
+ PadAfterNumber
+ PadAfterAffix
+
+ PadBeforePrefix = 0 // Default
+ PadAfterPrefix = PadAfterAffix
+ PadBeforeSuffix = PadAfterNumber
+ PadAfterSuffix = PadAfterNumber | PadAfterAffix
+ PadMask = PadAfterNumber | PadAfterAffix
+)
+
+type parser struct {
+ *Pattern
+
+ leadingSharps int
+
+ pos int
+ err error
+ doNotTerminate bool
+ groupingCount uint
+ hasGroup bool
+ buf []byte
+}
+
+func (p *parser) setError(err error) {
+ if p.err == nil {
+ p.err = err
+ }
+}
+
+func (p *parser) updateGrouping() {
+ if p.hasGroup &&
+ 0 < p.groupingCount && p.groupingCount < 255 {
+ p.GroupingSize[1] = p.GroupingSize[0]
+ p.GroupingSize[0] = uint8(p.groupingCount)
+ }
+ p.groupingCount = 0
+ p.hasGroup = true
+}
+
+var (
+ // TODO: more sensible and localizeable error messages.
+ errMultiplePadSpecifiers = errors.New("format: pattern has multiple pad specifiers")
+ errInvalidPadSpecifier = errors.New("format: invalid pad specifier")
+ errInvalidQuote = errors.New("format: invalid quote")
+ errAffixTooLarge = errors.New("format: prefix or suffix exceeds maximum UTF-8 length of 256 bytes")
+ errDuplicatePercentSign = errors.New("format: duplicate percent sign")
+ errDuplicatePermilleSign = errors.New("format: duplicate permille sign")
+ errUnexpectedEnd = errors.New("format: unexpected end of pattern")
+)
+
+// ParsePattern extracts formatting information from a CLDR number pattern.
+//
+// See https://unicode.org/reports/tr35/tr35-numbers.html#Number_Format_Patterns.
+func ParsePattern(s string) (f *Pattern, err error) {
+ p := parser{Pattern: &Pattern{}}
+
+ s = p.parseSubPattern(s)
+
+ if s != "" {
+ // Parse negative sub pattern.
+ if s[0] != ';' {
+ p.setError(errors.New("format: error parsing first sub pattern"))
+ return nil, p.err
+ }
+ neg := parser{Pattern: &Pattern{}} // just for extracting the affixes.
+ s = neg.parseSubPattern(s[len(";"):])
+ p.NegOffset = uint16(len(p.buf))
+ p.buf = append(p.buf, neg.buf...)
+ }
+ if s != "" {
+ p.setError(errors.New("format: spurious characters at end of pattern"))
+ }
+ if p.err != nil {
+ return nil, p.err
+ }
+ if affix := string(p.buf); affix == "\x00\x00" || affix == "\x00\x00\x00\x00" {
+ // No prefix or suffixes.
+ p.NegOffset = 0
+ } else {
+ p.Affix = affix
+ }
+ if p.Increment == 0 {
+ p.IncrementScale = 0
+ }
+ return p.Pattern, nil
+}
+
+func (p *parser) parseSubPattern(s string) string {
+ s = p.parsePad(s, PadBeforePrefix)
+ s = p.parseAffix(s)
+ s = p.parsePad(s, PadAfterPrefix)
+
+ s = p.parse(p.number, s)
+ p.updateGrouping()
+
+ s = p.parsePad(s, PadBeforeSuffix)
+ s = p.parseAffix(s)
+ s = p.parsePad(s, PadAfterSuffix)
+ return s
+}
+
+func (p *parser) parsePad(s string, f PatternFlag) (tail string) {
+ if len(s) >= 2 && s[0] == '*' {
+ r, sz := utf8.DecodeRuneInString(s[1:])
+ if p.PadRune != 0 {
+ p.err = errMultiplePadSpecifiers
+ } else {
+ p.Flags |= f
+ p.PadRune = r
+ }
+ return s[1+sz:]
+ }
+ return s
+}
+
+func (p *parser) parseAffix(s string) string {
+ x := len(p.buf)
+ p.buf = append(p.buf, 0) // placeholder for affix length
+
+ s = p.parse(p.affix, s)
+
+ n := len(p.buf) - x - 1
+ if n > 0xFF {
+ p.setError(errAffixTooLarge)
+ }
+ p.buf[x] = uint8(n)
+ return s
+}
+
+// state implements a state transition. It returns the new state. A state
+// function may set an error on the parser or may simply return on an incorrect
+// token and let the next phase fail.
+type state func(r rune) state
+
+// parse repeatedly applies a state function on the given string until a
+// termination condition is reached.
+func (p *parser) parse(fn state, s string) (tail string) {
+ for i, r := range s {
+ p.doNotTerminate = false
+ if fn = fn(r); fn == nil || p.err != nil {
+ return s[i:]
+ }
+ p.FormatWidth++
+ }
+ if p.doNotTerminate {
+ p.setError(errUnexpectedEnd)
+ }
+ return ""
+}
+
+func (p *parser) affix(r rune) state {
+ switch r {
+ case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
+ '#', '@', '.', '*', ',', ';':
+ return nil
+ case '\'':
+ p.FormatWidth--
+ return p.escapeFirst
+ case '%':
+ if p.DigitShift != 0 {
+ p.setError(errDuplicatePercentSign)
+ }
+ p.DigitShift = 2
+ case '\u2030': // ‰ Per mille
+ if p.DigitShift != 0 {
+ p.setError(errDuplicatePermilleSign)
+ }
+ p.DigitShift = 3
+ // TODO: handle currency somehow: ¤, ¤¤, ¤¤¤, ¤¤¤¤
+ }
+ p.buf = append(p.buf, string(r)...)
+ return p.affix
+}
+
+func (p *parser) escapeFirst(r rune) state {
+ switch r {
+ case '\'':
+ p.buf = append(p.buf, "\\'"...)
+ return p.affix
+ default:
+ p.buf = append(p.buf, '\'')
+ p.buf = append(p.buf, string(r)...)
+ }
+ return p.escape
+}
+
+func (p *parser) escape(r rune) state {
+ switch r {
+ case '\'':
+ p.FormatWidth--
+ p.buf = append(p.buf, '\'')
+ return p.affix
+ default:
+ p.buf = append(p.buf, string(r)...)
+ }
+ return p.escape
+}
+
+// number parses a number. The BNF says the integer part should always have
+// a '0', but that does not appear to be the case according to the rest of the
+// documentation. We will allow having only '#' numbers.
+func (p *parser) number(r rune) state {
+ switch r {
+ case '#':
+ p.groupingCount++
+ p.leadingSharps++
+ case '@':
+ p.groupingCount++
+ p.leadingSharps = 0
+ p.MaxFractionDigits = -1
+ return p.sigDigits(r)
+ case ',':
+ if p.leadingSharps == 0 { // no leading commas
+ return nil
+ }
+ p.updateGrouping()
+ case 'E':
+ p.MaxIntegerDigits = uint8(p.leadingSharps)
+ return p.exponent
+ case '.': // allow ".##" etc.
+ p.updateGrouping()
+ return p.fraction
+ case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+ return p.integer(r)
+ default:
+ return nil
+ }
+ return p.number
+}
+
+func (p *parser) integer(r rune) state {
+ if !('0' <= r && r <= '9') {
+ var next state
+ switch r {
+ case 'E':
+ if p.leadingSharps > 0 {
+ p.MaxIntegerDigits = uint8(p.leadingSharps) + p.MinIntegerDigits
+ }
+ next = p.exponent
+ case '.':
+ next = p.fraction
+ case ',':
+ next = p.integer
+ }
+ p.updateGrouping()
+ return next
+ }
+ p.Increment = p.Increment*10 + uint32(r-'0')
+ p.groupingCount++
+ p.MinIntegerDigits++
+ return p.integer
+}
+
+func (p *parser) sigDigits(r rune) state {
+ switch r {
+ case '@':
+ p.groupingCount++
+ p.MaxSignificantDigits++
+ p.MinSignificantDigits++
+ case '#':
+ return p.sigDigitsFinal(r)
+ case 'E':
+ p.updateGrouping()
+ return p.normalizeSigDigitsWithExponent()
+ default:
+ p.updateGrouping()
+ return nil
+ }
+ return p.sigDigits
+}
+
+func (p *parser) sigDigitsFinal(r rune) state {
+ switch r {
+ case '#':
+ p.groupingCount++
+ p.MaxSignificantDigits++
+ case 'E':
+ p.updateGrouping()
+ return p.normalizeSigDigitsWithExponent()
+ default:
+ p.updateGrouping()
+ return nil
+ }
+ return p.sigDigitsFinal
+}
+
+func (p *parser) normalizeSigDigitsWithExponent() state {
+ p.MinIntegerDigits, p.MaxIntegerDigits = 1, 1
+ p.MinFractionDigits = p.MinSignificantDigits - 1
+ p.MaxFractionDigits = p.MaxSignificantDigits - 1
+ p.MinSignificantDigits, p.MaxSignificantDigits = 0, 0
+ return p.exponent
+}
+
+func (p *parser) fraction(r rune) state {
+ switch r {
+ case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+ p.Increment = p.Increment*10 + uint32(r-'0')
+ p.IncrementScale++
+ p.MinFractionDigits++
+ p.MaxFractionDigits++
+ case '#':
+ p.MaxFractionDigits++
+ case 'E':
+ if p.leadingSharps > 0 {
+ p.MaxIntegerDigits = uint8(p.leadingSharps) + p.MinIntegerDigits
+ }
+ return p.exponent
+ default:
+ return nil
+ }
+ return p.fraction
+}
+
+func (p *parser) exponent(r rune) state {
+ switch r {
+ case '+':
+ // Set mode and check it wasn't already set.
+ if p.Flags&AlwaysExpSign != 0 || p.MinExponentDigits > 0 {
+ break
+ }
+ p.Flags |= AlwaysExpSign
+ p.doNotTerminate = true
+ return p.exponent
+ case '0':
+ p.MinExponentDigits++
+ return p.exponent
+ }
+ // termination condition
+ if p.MinExponentDigits == 0 {
+ p.setError(errors.New("format: need at least one digit"))
+ }
+ return nil
+}
diff --git a/vendor/golang.org/x/text/internal/number/roundingmode_string.go b/vendor/golang.org/x/text/internal/number/roundingmode_string.go
new file mode 100644
index 0000000000..bcc22471db
--- /dev/null
+++ b/vendor/golang.org/x/text/internal/number/roundingmode_string.go
@@ -0,0 +1,30 @@
+// Code generated by "stringer -type RoundingMode"; DO NOT EDIT.
+
+package number
+
+import "strconv"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[ToNearestEven-0]
+ _ = x[ToNearestZero-1]
+ _ = x[ToNearestAway-2]
+ _ = x[ToPositiveInf-3]
+ _ = x[ToNegativeInf-4]
+ _ = x[ToZero-5]
+ _ = x[AwayFromZero-6]
+ _ = x[numModes-7]
+}
+
+const _RoundingMode_name = "ToNearestEvenToNearestZeroToNearestAwayToPositiveInfToNegativeInfToZeroAwayFromZeronumModes"
+
+var _RoundingMode_index = [...]uint8{0, 13, 26, 39, 52, 65, 71, 83, 91}
+
+func (i RoundingMode) String() string {
+ if i >= RoundingMode(len(_RoundingMode_index)-1) {
+ return "RoundingMode(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+ return _RoundingMode_name[_RoundingMode_index[i]:_RoundingMode_index[i+1]]
+}
diff --git a/vendor/golang.org/x/text/internal/number/tables.go b/vendor/golang.org/x/text/internal/number/tables.go
new file mode 100644
index 0000000000..8efce81b56
--- /dev/null
+++ b/vendor/golang.org/x/text/internal/number/tables.go
@@ -0,0 +1,1219 @@
+// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
+
+package number
+
+import "golang.org/x/text/internal/stringset"
+
+// CLDRVersion is the CLDR version from which the tables in this package are derived.
+const CLDRVersion = "32"
+
+var numSysData = []systemData{ // 59 elements
+ 0: {id: 0x0, digitSize: 0x1, zero: [4]uint8{0x30, 0x0, 0x0, 0x0}},
+ 1: {id: 0x1, digitSize: 0x4, zero: [4]uint8{0xf0, 0x9e, 0xa5, 0x90}},
+ 2: {id: 0x2, digitSize: 0x4, zero: [4]uint8{0xf0, 0x91, 0x9c, 0xb0}},
+ 3: {id: 0x3, digitSize: 0x2, zero: [4]uint8{0xd9, 0xa0, 0x0, 0x0}},
+ 4: {id: 0x4, digitSize: 0x2, zero: [4]uint8{0xdb, 0xb0, 0x0, 0x0}},
+ 5: {id: 0x5, digitSize: 0x3, zero: [4]uint8{0xe1, 0xad, 0x90, 0x0}},
+ 6: {id: 0x6, digitSize: 0x3, zero: [4]uint8{0xe0, 0xa7, 0xa6, 0x0}},
+ 7: {id: 0x7, digitSize: 0x4, zero: [4]uint8{0xf0, 0x91, 0xb1, 0x90}},
+ 8: {id: 0x8, digitSize: 0x4, zero: [4]uint8{0xf0, 0x91, 0x81, 0xa6}},
+ 9: {id: 0x9, digitSize: 0x4, zero: [4]uint8{0xf0, 0x91, 0x84, 0xb6}},
+ 10: {id: 0xa, digitSize: 0x3, zero: [4]uint8{0xea, 0xa9, 0x90, 0x0}},
+ 11: {id: 0xb, digitSize: 0x3, zero: [4]uint8{0xe0, 0xa5, 0xa6, 0x0}},
+ 12: {id: 0xc, digitSize: 0x3, zero: [4]uint8{0xef, 0xbc, 0x90, 0x0}},
+ 13: {id: 0xd, digitSize: 0x4, zero: [4]uint8{0xf0, 0x91, 0xb5, 0x90}},
+ 14: {id: 0xe, digitSize: 0x3, zero: [4]uint8{0xe0, 0xab, 0xa6, 0x0}},
+ 15: {id: 0xf, digitSize: 0x3, zero: [4]uint8{0xe0, 0xa9, 0xa6, 0x0}},
+ 16: {id: 0x10, digitSize: 0x4, zero: [4]uint8{0xf0, 0x96, 0xad, 0x90}},
+ 17: {id: 0x11, digitSize: 0x3, zero: [4]uint8{0xea, 0xa7, 0x90, 0x0}},
+ 18: {id: 0x12, digitSize: 0x3, zero: [4]uint8{0xea, 0xa4, 0x80, 0x0}},
+ 19: {id: 0x13, digitSize: 0x3, zero: [4]uint8{0xe1, 0x9f, 0xa0, 0x0}},
+ 20: {id: 0x14, digitSize: 0x3, zero: [4]uint8{0xe0, 0xb3, 0xa6, 0x0}},
+ 21: {id: 0x15, digitSize: 0x3, zero: [4]uint8{0xe1, 0xaa, 0x80, 0x0}},
+ 22: {id: 0x16, digitSize: 0x3, zero: [4]uint8{0xe1, 0xaa, 0x90, 0x0}},
+ 23: {id: 0x17, digitSize: 0x3, zero: [4]uint8{0xe0, 0xbb, 0x90, 0x0}},
+ 24: {id: 0x18, digitSize: 0x3, zero: [4]uint8{0xe1, 0xb1, 0x80, 0x0}},
+ 25: {id: 0x19, digitSize: 0x3, zero: [4]uint8{0xe1, 0xa5, 0x86, 0x0}},
+ 26: {id: 0x1a, digitSize: 0x4, zero: [4]uint8{0xf0, 0x9d, 0x9f, 0x8e}},
+ 27: {id: 0x1b, digitSize: 0x4, zero: [4]uint8{0xf0, 0x9d, 0x9f, 0x98}},
+ 28: {id: 0x1c, digitSize: 0x4, zero: [4]uint8{0xf0, 0x9d, 0x9f, 0xb6}},
+ 29: {id: 0x1d, digitSize: 0x4, zero: [4]uint8{0xf0, 0x9d, 0x9f, 0xac}},
+ 30: {id: 0x1e, digitSize: 0x4, zero: [4]uint8{0xf0, 0x9d, 0x9f, 0xa2}},
+ 31: {id: 0x1f, digitSize: 0x3, zero: [4]uint8{0xe0, 0xb5, 0xa6, 0x0}},
+ 32: {id: 0x20, digitSize: 0x4, zero: [4]uint8{0xf0, 0x91, 0x99, 0x90}},
+ 33: {id: 0x21, digitSize: 0x3, zero: [4]uint8{0xe1, 0xa0, 0x90, 0x0}},
+ 34: {id: 0x22, digitSize: 0x4, zero: [4]uint8{0xf0, 0x96, 0xa9, 0xa0}},
+ 35: {id: 0x23, digitSize: 0x3, zero: [4]uint8{0xea, 0xaf, 0xb0, 0x0}},
+ 36: {id: 0x24, digitSize: 0x3, zero: [4]uint8{0xe1, 0x81, 0x80, 0x0}},
+ 37: {id: 0x25, digitSize: 0x3, zero: [4]uint8{0xe1, 0x82, 0x90, 0x0}},
+ 38: {id: 0x26, digitSize: 0x3, zero: [4]uint8{0xea, 0xa7, 0xb0, 0x0}},
+ 39: {id: 0x27, digitSize: 0x4, zero: [4]uint8{0xf0, 0x91, 0x91, 0x90}},
+ 40: {id: 0x28, digitSize: 0x2, zero: [4]uint8{0xdf, 0x80, 0x0, 0x0}},
+ 41: {id: 0x29, digitSize: 0x3, zero: [4]uint8{0xe1, 0xb1, 0x90, 0x0}},
+ 42: {id: 0x2a, digitSize: 0x3, zero: [4]uint8{0xe0, 0xad, 0xa6, 0x0}},
+ 43: {id: 0x2b, digitSize: 0x4, zero: [4]uint8{0xf0, 0x90, 0x92, 0xa0}},
+ 44: {id: 0x2c, digitSize: 0x3, zero: [4]uint8{0xea, 0xa3, 0x90, 0x0}},
+ 45: {id: 0x2d, digitSize: 0x4, zero: [4]uint8{0xf0, 0x91, 0x87, 0x90}},
+ 46: {id: 0x2e, digitSize: 0x4, zero: [4]uint8{0xf0, 0x91, 0x8b, 0xb0}},
+ 47: {id: 0x2f, digitSize: 0x3, zero: [4]uint8{0xe0, 0xb7, 0xa6, 0x0}},
+ 48: {id: 0x30, digitSize: 0x4, zero: [4]uint8{0xf0, 0x91, 0x83, 0xb0}},
+ 49: {id: 0x31, digitSize: 0x3, zero: [4]uint8{0xe1, 0xae, 0xb0, 0x0}},
+ 50: {id: 0x32, digitSize: 0x4, zero: [4]uint8{0xf0, 0x91, 0x9b, 0x80}},
+ 51: {id: 0x33, digitSize: 0x3, zero: [4]uint8{0xe1, 0xa7, 0x90, 0x0}},
+ 52: {id: 0x34, digitSize: 0x3, zero: [4]uint8{0xe0, 0xaf, 0xa6, 0x0}},
+ 53: {id: 0x35, digitSize: 0x3, zero: [4]uint8{0xe0, 0xb1, 0xa6, 0x0}},
+ 54: {id: 0x36, digitSize: 0x3, zero: [4]uint8{0xe0, 0xb9, 0x90, 0x0}},
+ 55: {id: 0x37, digitSize: 0x3, zero: [4]uint8{0xe0, 0xbc, 0xa0, 0x0}},
+ 56: {id: 0x38, digitSize: 0x4, zero: [4]uint8{0xf0, 0x91, 0x93, 0x90}},
+ 57: {id: 0x39, digitSize: 0x3, zero: [4]uint8{0xea, 0x98, 0xa0, 0x0}},
+ 58: {id: 0x3a, digitSize: 0x4, zero: [4]uint8{0xf0, 0x91, 0xa3, 0xa0}},
+} // Size: 378 bytes
+
+const (
+ numAdlm = 0x1
+ numAhom = 0x2
+ numArab = 0x3
+ numArabext = 0x4
+ numArmn = 0x3b
+ numArmnlow = 0x3c
+ numBali = 0x5
+ numBeng = 0x6
+ numBhks = 0x7
+ numBrah = 0x8
+ numCakm = 0x9
+ numCham = 0xa
+ numCyrl = 0x3d
+ numDeva = 0xb
+ numEthi = 0x3e
+ numFullwide = 0xc
+ numGeor = 0x3f
+ numGonm = 0xd
+ numGrek = 0x40
+ numGreklow = 0x41
+ numGujr = 0xe
+ numGuru = 0xf
+ numHanidays = 0x42
+ numHanidec = 0x43
+ numHans = 0x44
+ numHansfin = 0x45
+ numHant = 0x46
+ numHantfin = 0x47
+ numHebr = 0x48
+ numHmng = 0x10
+ numJava = 0x11
+ numJpan = 0x49
+ numJpanfin = 0x4a
+ numKali = 0x12
+ numKhmr = 0x13
+ numKnda = 0x14
+ numLana = 0x15
+ numLanatham = 0x16
+ numLaoo = 0x17
+ numLatn = 0x0
+ numLepc = 0x18
+ numLimb = 0x19
+ numMathbold = 0x1a
+ numMathdbl = 0x1b
+ numMathmono = 0x1c
+ numMathsanb = 0x1d
+ numMathsans = 0x1e
+ numMlym = 0x1f
+ numModi = 0x20
+ numMong = 0x21
+ numMroo = 0x22
+ numMtei = 0x23
+ numMymr = 0x24
+ numMymrshan = 0x25
+ numMymrtlng = 0x26
+ numNewa = 0x27
+ numNkoo = 0x28
+ numOlck = 0x29
+ numOrya = 0x2a
+ numOsma = 0x2b
+ numRoman = 0x4b
+ numRomanlow = 0x4c
+ numSaur = 0x2c
+ numShrd = 0x2d
+ numSind = 0x2e
+ numSinh = 0x2f
+ numSora = 0x30
+ numSund = 0x31
+ numTakr = 0x32
+ numTalu = 0x33
+ numTaml = 0x4d
+ numTamldec = 0x34
+ numTelu = 0x35
+ numThai = 0x36
+ numTibt = 0x37
+ numTirh = 0x38
+ numVaii = 0x39
+ numWara = 0x3a
+ numNumberSystems
+)
+
+var systemMap = map[string]system{
+ "adlm": numAdlm,
+ "ahom": numAhom,
+ "arab": numArab,
+ "arabext": numArabext,
+ "armn": numArmn,
+ "armnlow": numArmnlow,
+ "bali": numBali,
+ "beng": numBeng,
+ "bhks": numBhks,
+ "brah": numBrah,
+ "cakm": numCakm,
+ "cham": numCham,
+ "cyrl": numCyrl,
+ "deva": numDeva,
+ "ethi": numEthi,
+ "fullwide": numFullwide,
+ "geor": numGeor,
+ "gonm": numGonm,
+ "grek": numGrek,
+ "greklow": numGreklow,
+ "gujr": numGujr,
+ "guru": numGuru,
+ "hanidays": numHanidays,
+ "hanidec": numHanidec,
+ "hans": numHans,
+ "hansfin": numHansfin,
+ "hant": numHant,
+ "hantfin": numHantfin,
+ "hebr": numHebr,
+ "hmng": numHmng,
+ "java": numJava,
+ "jpan": numJpan,
+ "jpanfin": numJpanfin,
+ "kali": numKali,
+ "khmr": numKhmr,
+ "knda": numKnda,
+ "lana": numLana,
+ "lanatham": numLanatham,
+ "laoo": numLaoo,
+ "latn": numLatn,
+ "lepc": numLepc,
+ "limb": numLimb,
+ "mathbold": numMathbold,
+ "mathdbl": numMathdbl,
+ "mathmono": numMathmono,
+ "mathsanb": numMathsanb,
+ "mathsans": numMathsans,
+ "mlym": numMlym,
+ "modi": numModi,
+ "mong": numMong,
+ "mroo": numMroo,
+ "mtei": numMtei,
+ "mymr": numMymr,
+ "mymrshan": numMymrshan,
+ "mymrtlng": numMymrtlng,
+ "newa": numNewa,
+ "nkoo": numNkoo,
+ "olck": numOlck,
+ "orya": numOrya,
+ "osma": numOsma,
+ "roman": numRoman,
+ "romanlow": numRomanlow,
+ "saur": numSaur,
+ "shrd": numShrd,
+ "sind": numSind,
+ "sinh": numSinh,
+ "sora": numSora,
+ "sund": numSund,
+ "takr": numTakr,
+ "talu": numTalu,
+ "taml": numTaml,
+ "tamldec": numTamldec,
+ "telu": numTelu,
+ "thai": numThai,
+ "tibt": numTibt,
+ "tirh": numTirh,
+ "vaii": numVaii,
+ "wara": numWara,
+}
+
+var symIndex = [][12]uint8{ // 81 elements
+ 0: [12]uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb},
+ 1: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb},
+ 2: [12]uint8{0x0, 0x1, 0x2, 0xd, 0xe, 0xf, 0x6, 0x7, 0x8, 0x9, 0x10, 0xb},
+ 3: [12]uint8{0x1, 0x0, 0x2, 0xd, 0xe, 0xf, 0x6, 0x7, 0x8, 0x9, 0x10, 0xb},
+ 4: [12]uint8{0x0, 0x1, 0x2, 0x11, 0xe, 0xf, 0x6, 0x7, 0x8, 0x9, 0x10, 0xb},
+ 5: [12]uint8{0x1, 0x0, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x12, 0xb},
+ 6: [12]uint8{0x1, 0x0, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb},
+ 7: [12]uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x13, 0xb},
+ 8: [12]uint8{0x0, 0x1, 0x2, 0x3, 0xe, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb},
+ 9: [12]uint8{0x1, 0x0, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0x0},
+ 10: [12]uint8{0x1, 0x0, 0x2, 0x3, 0x4, 0x5, 0x6, 0x14, 0x8, 0x9, 0xa, 0xb},
+ 11: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x5, 0x6, 0x14, 0x8, 0x9, 0xa, 0xb},
+ 12: [12]uint8{0x0, 0x15, 0x2, 0x3, 0x4, 0x5, 0x6, 0x14, 0x8, 0x9, 0xa, 0xb},
+ 13: [12]uint8{0x0, 0xc, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb},
+ 14: [12]uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x16, 0xb},
+ 15: [12]uint8{0x1, 0x0, 0x2, 0x3, 0x4, 0x5, 0x17, 0x7, 0x8, 0x9, 0xa, 0xb},
+ 16: [12]uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x17, 0x7, 0x8, 0x9, 0xa, 0x0},
+ 17: [12]uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x17, 0x7, 0x8, 0x9, 0xa, 0xb},
+ 18: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0x0},
+ 19: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x5, 0x18, 0x7, 0x8, 0x9, 0xa, 0xb},
+ 20: [12]uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x19, 0x1a, 0xa, 0xb},
+ 21: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x1b, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb},
+ 22: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x1b, 0x18, 0x7, 0x8, 0x9, 0xa, 0xb},
+ 23: [12]uint8{0x1, 0x0, 0x2, 0x3, 0x4, 0x1b, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb},
+ 24: [12]uint8{0x0, 0x1, 0x2, 0x3, 0xe, 0x1c, 0x6, 0x7, 0x8, 0x9, 0x1d, 0xb},
+ 25: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x1b, 0x6, 0x7, 0x8, 0x9, 0x1e, 0x0},
+ 26: [12]uint8{0x0, 0x15, 0x2, 0x3, 0x4, 0x1b, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb},
+ 27: [12]uint8{0x0, 0x1, 0x2, 0x3, 0xe, 0xf, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb},
+ 28: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x1f, 0xb},
+ 29: [12]uint8{0x0, 0x15, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb},
+ 30: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x20, 0xb},
+ 31: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x5, 0x21, 0x7, 0x8, 0x9, 0x22, 0xb},
+ 32: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x23, 0xb},
+ 33: [12]uint8{0x1, 0x0, 0x2, 0x3, 0x4, 0x1b, 0x18, 0x14, 0x8, 0x9, 0x24, 0xb},
+ 34: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x1b, 0x18, 0x7, 0x8, 0x9, 0x24, 0xb},
+ 35: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x25, 0xb},
+ 36: [12]uint8{0x1, 0x0, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x26, 0xb},
+ 37: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x27, 0xb},
+ 38: [12]uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x28, 0xb},
+ 39: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x29, 0xb},
+ 40: [12]uint8{0x1, 0x0, 0x2, 0x3, 0xe, 0x1c, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb},
+ 41: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x2a, 0xb},
+ 42: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x2b, 0xb},
+ 43: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x1b, 0x2c, 0x14, 0x8, 0x9, 0x24, 0xb},
+ 44: [12]uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0x0},
+ 45: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x5, 0x17, 0x7, 0x8, 0x9, 0xa, 0xb},
+ 46: [12]uint8{0x1, 0x0, 0x2, 0x3, 0x4, 0x1b, 0x17, 0x7, 0x8, 0x9, 0xa, 0xb},
+ 47: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x2d, 0x0},
+ 48: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x2e, 0xb},
+ 49: [12]uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x2f, 0xb},
+ 50: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x5, 0x30, 0x7, 0x8, 0x9, 0xa, 0xb},
+ 51: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x31, 0xb},
+ 52: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x32, 0xb},
+ 53: [12]uint8{0x1, 0x15, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb},
+ 54: [12]uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x33, 0xb},
+ 55: [12]uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x34, 0xb},
+ 56: [12]uint8{0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x7, 0x3c, 0x9, 0xa, 0xb},
+ 57: [12]uint8{0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x7, 0x3c, 0x9, 0x3d, 0xb},
+ 58: [12]uint8{0x35, 0x36, 0x37, 0x11, 0x3e, 0x3f, 0x3b, 0x7, 0x3c, 0x9, 0xa, 0xb},
+ 59: [12]uint8{0x35, 0x36, 0x37, 0x11, 0x39, 0x3a, 0x3b, 0x7, 0x3c, 0x9, 0xa, 0xb},
+ 60: [12]uint8{0x35, 0x36, 0x37, 0x11, 0x39, 0x40, 0x3b, 0x7, 0x3c, 0x9, 0xa, 0xb},
+ 61: [12]uint8{0x35, 0x36, 0x37, 0x41, 0x3e, 0x3f, 0x3b, 0x7, 0x3c, 0x9, 0xa, 0xb},
+ 62: [12]uint8{0x35, 0x36, 0x37, 0x38, 0x3e, 0x3f, 0x3b, 0x7, 0x3c, 0x9, 0xa, 0xb},
+ 63: [12]uint8{0x35, 0xc, 0x37, 0x38, 0x39, 0x42, 0x3b, 0x7, 0x3c, 0x9, 0xa, 0x0},
+ 64: [12]uint8{0x35, 0xc, 0x37, 0x38, 0x39, 0x42, 0x43, 0x7, 0x44, 0x9, 0x24, 0xb},
+ 65: [12]uint8{0x35, 0x36, 0x37, 0x38, 0x39, 0x5, 0x3b, 0x7, 0x3c, 0x9, 0x33, 0xb},
+ 66: [12]uint8{0x35, 0x36, 0x37, 0x11, 0x45, 0x46, 0x43, 0x7, 0x3c, 0x9, 0xa, 0x35},
+ 67: [12]uint8{0x35, 0x36, 0x37, 0x11, 0xe, 0x1c, 0x43, 0x7, 0x3c, 0x9, 0x1d, 0xb},
+ 68: [12]uint8{0x35, 0x36, 0x37, 0x11, 0xe, 0x1c, 0x43, 0x7, 0x3c, 0x9, 0xa, 0x35},
+ 69: [12]uint8{0x35, 0x36, 0x37, 0x11, 0x45, 0x5, 0x43, 0x7, 0x3c, 0x9, 0xa, 0x35},
+ 70: [12]uint8{0x1, 0xc, 0x37, 0x11, 0x45, 0x47, 0x43, 0x7, 0x3c, 0x9, 0xa, 0x0},
+ 71: [12]uint8{0x35, 0x1, 0x37, 0x11, 0x4, 0x5, 0x43, 0x7, 0x3c, 0x9, 0xa, 0x35},
+ 72: [12]uint8{0x1, 0xc, 0x37, 0x11, 0x45, 0x47, 0x43, 0x7, 0x3c, 0x9, 0x24, 0xb},
+ 73: [12]uint8{0x35, 0x36, 0x2, 0x3, 0x45, 0x46, 0x43, 0x7, 0x8, 0x9, 0xa, 0x35},
+ 74: [12]uint8{0x35, 0x36, 0x37, 0x11, 0x4, 0x5, 0x43, 0x7, 0x3c, 0x9, 0x31, 0x35},
+ 75: [12]uint8{0x35, 0x36, 0x37, 0x11, 0x4, 0x5, 0x43, 0x7, 0x3c, 0x9, 0x32, 0x35},
+ 76: [12]uint8{0x35, 0x36, 0x37, 0x11, 0x48, 0x46, 0x43, 0x7, 0x3c, 0x9, 0x33, 0x35},
+ 77: [12]uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0x49},
+ 78: [12]uint8{0x0, 0x1, 0x4a, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x28, 0xb},
+ 79: [12]uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x4b, 0xb},
+ 80: [12]uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x4c, 0x4d, 0xb},
+} // Size: 996 bytes
+
+var symData = stringset.Set{
+ Data: "" + // Size: 599 bytes
+ ".,;%+-E׉∞NaN:\u00a0\u200e%\u200e\u200e+\u200e-ليس\u00a0رقمًا٪NDТерхьаш" +
+ "\u00a0дац·’mnne×10^0/00INF−\u200e−ناعددepälukuՈչԹარ\u00a0არის\u00a0რიცხვ" +
+ "იZMdMсан\u00a0емес¤¤¤сан\u00a0эмесບໍ່\u200bແມ່ນ\u200bໂຕ\u200bເລກNSဂဏန်" +
+ "းမဟုတ်သောННне\u00a0числочыыһыла\u00a0буотах·10^epilohosan\u00a0dälTFЕs" +
+ "on\u00a0emasҳақиқий\u00a0сон\u00a0эмас非數值非数值٫٬؛٪\u061c\u061c+\u061c-اس؉ل" +
+ "يس\u00a0رقم\u200f+\u200f-\u200f−٪\u200f\u061c−×۱۰^؉\u200f\u200e+\u200e" +
+ "\u200e-\u200e\u200e−\u200e+\u200e:၊ཨང་མེན་གྲངས་མེདཨང་མད",
+ Index: []uint16{ // 79 elements
+ // Entry 0 - 3F
+ 0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007,
+ 0x0009, 0x000c, 0x000f, 0x0012, 0x0013, 0x0015, 0x001c, 0x0020,
+ 0x0024, 0x0036, 0x0038, 0x003a, 0x0050, 0x0052, 0x0055, 0x0058,
+ 0x0059, 0x005e, 0x0062, 0x0065, 0x0068, 0x006e, 0x0078, 0x0080,
+ 0x0086, 0x00ae, 0x00af, 0x00b2, 0x00c2, 0x00c8, 0x00d8, 0x0105,
+ 0x0107, 0x012e, 0x0132, 0x0142, 0x015e, 0x0163, 0x016a, 0x0173,
+ 0x0175, 0x0177, 0x0180, 0x01a0, 0x01a9, 0x01b2, 0x01b4, 0x01b6,
+ 0x01b8, 0x01bc, 0x01bf, 0x01c2, 0x01c6, 0x01c8, 0x01d6, 0x01da,
+ // Entry 40 - 7F
+ 0x01de, 0x01e4, 0x01e9, 0x01ee, 0x01f5, 0x01fa, 0x0201, 0x0208,
+ 0x0211, 0x0215, 0x0218, 0x021b, 0x0230, 0x0248, 0x0257,
+ },
+} // Size: 797 bytes
+
+// langToDefaults maps a compact language index to the default numbering system
+// and default symbol set
+var langToDefaults = [775]symOffset{
+ // Entry 0 - 3F
+ 0x8000, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x8003, 0x0002, 0x0002, 0x0002, 0x0002, 0x0003,
+ 0x0002, 0x0002, 0x0002, 0x0002, 0x0002, 0x0002, 0x0002, 0x0002,
+ 0x0003, 0x0003, 0x0003, 0x0003, 0x0002, 0x0002, 0x0002, 0x0004,
+ 0x0002, 0x0004, 0x0002, 0x0002, 0x0002, 0x0003, 0x0002, 0x0000,
+ 0x8005, 0x0000, 0x0000, 0x0000, 0x8006, 0x0005, 0x0006, 0x0006,
+ 0x0006, 0x0006, 0x0006, 0x0001, 0x0001, 0x0001, 0x0001, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0001, 0x0001, 0x0000, 0x0000, 0x0000,
+ // Entry 40 - 7F
+ 0x8009, 0x0000, 0x0000, 0x800a, 0x0000, 0x0000, 0x800c, 0x0001,
+ 0x0000, 0x0000, 0x0006, 0x0006, 0x0006, 0x0006, 0x0006, 0x0006,
+ 0x0006, 0x0006, 0x0006, 0x0006, 0x800e, 0x0000, 0x0000, 0x0007,
+ 0x0007, 0x0000, 0x0000, 0x0000, 0x0000, 0x800f, 0x0008, 0x0008,
+ 0x8011, 0x0001, 0x0001, 0x0001, 0x803c, 0x0000, 0x0009, 0x0009,
+ 0x0009, 0x0000, 0x0000, 0x000a, 0x000b, 0x000a, 0x000c, 0x000a,
+ 0x000a, 0x000c, 0x000a, 0x000d, 0x000d, 0x000a, 0x000a, 0x0001,
+ 0x0001, 0x0000, 0x0001, 0x0001, 0x803f, 0x0000, 0x0000, 0x0000,
+ // Entry 80 - BF
+ 0x000e, 0x000e, 0x000e, 0x000f, 0x000f, 0x000f, 0x0000, 0x0000,
+ 0x0006, 0x0000, 0x0000, 0x0000, 0x000a, 0x0010, 0x0000, 0x0006,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0011, 0x0000, 0x000a,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x000a, 0x0000, 0x0009, 0x0000,
+ 0x0000, 0x0012, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ // Entry C0 - FF
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0006, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0013, 0x0000,
+ 0x0000, 0x000f, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0001, 0x0000, 0x0000, 0x0015,
+ 0x0015, 0x0006, 0x0000, 0x0006, 0x0006, 0x0000, 0x0000, 0x0006,
+ 0x0006, 0x0001, 0x0000, 0x0000, 0x0006, 0x0006, 0x0006, 0x0006,
+ // Entry 100 - 13F
+ 0x0000, 0x0000, 0x0006, 0x0000, 0x0000, 0x0000, 0x0000, 0x0006,
+ 0x0000, 0x0006, 0x0000, 0x0000, 0x0006, 0x0006, 0x0016, 0x0016,
+ 0x0017, 0x0017, 0x0001, 0x0001, 0x8041, 0x0018, 0x0018, 0x0001,
+ 0x0001, 0x0001, 0x0001, 0x0001, 0x0019, 0x0019, 0x0000, 0x0000,
+ 0x0017, 0x0017, 0x0017, 0x8044, 0x0001, 0x0001, 0x0001, 0x0001,
+ 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001,
+ 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001,
+ 0x0001, 0x0001, 0x0006, 0x0006, 0x0001, 0x0001, 0x0001, 0x0001,
+ // Entry 140 - 17F
+ 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001,
+ 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001,
+ 0x0001, 0x0001, 0x0006, 0x0006, 0x0006, 0x0006, 0x0000, 0x0000,
+ 0x8047, 0x0000, 0x0006, 0x0006, 0x001a, 0x001a, 0x001a, 0x001a,
+ 0x804a, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x804c, 0x001b, 0x0000,
+ 0x0000, 0x0006, 0x0006, 0x0006, 0x000a, 0x000a, 0x0001, 0x0001,
+ 0x001c, 0x001c, 0x0009, 0x0009, 0x804f, 0x0000, 0x0000, 0x0000,
+ // Entry 180 - 1BF
+ 0x0000, 0x0000, 0x8052, 0x0006, 0x0006, 0x001d, 0x0006, 0x0006,
+ 0x0006, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0006, 0x0006,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x001e, 0x001e, 0x001f,
+ 0x001f, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0001,
+ 0x0001, 0x000d, 0x000d, 0x0000, 0x0000, 0x0020, 0x0020, 0x0006,
+ 0x0006, 0x0021, 0x0021, 0x0000, 0x0000, 0x0006, 0x0006, 0x0000,
+ 0x0000, 0x8054, 0x0000, 0x0000, 0x0000, 0x0000, 0x8056, 0x001b,
+ 0x0000, 0x0000, 0x0001, 0x0001, 0x0022, 0x0022, 0x0000, 0x0000,
+ // Entry 1C0 - 1FF
+ 0x0000, 0x0023, 0x0023, 0x0000, 0x0000, 0x0006, 0x0006, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0006, 0x0006, 0x0006, 0x0006, 0x0006,
+ 0x0024, 0x0024, 0x8058, 0x0000, 0x0000, 0x0016, 0x0016, 0x0006,
+ 0x0006, 0x0000, 0x0000, 0x0000, 0x0000, 0x0025, 0x0025, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x000d, 0x000d, 0x0000, 0x0000,
+ 0x0006, 0x0006, 0x0000, 0x0000, 0x0006, 0x0006, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x805a, 0x0000, 0x0000, 0x0006, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0006, 0x0006, 0x805b, 0x0026, 0x805d,
+ // Entry 200 - 23F
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x805e, 0x0015, 0x0015, 0x0000,
+ 0x0000, 0x0006, 0x0006, 0x0006, 0x8061, 0x0000, 0x0000, 0x8062,
+ 0x0006, 0x0006, 0x0006, 0x0006, 0x0006, 0x0006, 0x0006, 0x0001,
+ 0x0001, 0x0015, 0x0015, 0x0006, 0x0006, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0027, 0x0027, 0x0027, 0x8065, 0x8067,
+ 0x001b, 0x0000, 0x0000, 0x0000, 0x0001, 0x0001, 0x0001, 0x0001,
+ 0x8069, 0x0028, 0x0006, 0x0001, 0x0006, 0x0001, 0x0001, 0x0001,
+ // Entry 240 - 27F
+ 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0000,
+ 0x0006, 0x0000, 0x0000, 0x001a, 0x001a, 0x0006, 0x0006, 0x0006,
+ 0x0006, 0x0006, 0x0000, 0x0000, 0x0029, 0x0029, 0x0029, 0x0029,
+ 0x0029, 0x0029, 0x0029, 0x0006, 0x0006, 0x0000, 0x0000, 0x002a,
+ 0x002a, 0x0000, 0x0000, 0x0000, 0x0000, 0x806b, 0x0000, 0x0000,
+ 0x002b, 0x002b, 0x002b, 0x002b, 0x0006, 0x0006, 0x000d, 0x000d,
+ 0x0006, 0x0006, 0x0000, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001,
+ 0x002c, 0x002c, 0x002d, 0x002d, 0x002e, 0x002e, 0x0000, 0x0000,
+ // Entry 280 - 2BF
+ 0x0000, 0x002f, 0x002f, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0001, 0x0001, 0x0001, 0x0001, 0x0006,
+ 0x0006, 0x0006, 0x0006, 0x0006, 0x0006, 0x0006, 0x0006, 0x0006,
+ 0x0006, 0x0006, 0x0000, 0x0000, 0x0000, 0x806d, 0x0022, 0x0022,
+ 0x0022, 0x0000, 0x0006, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0001, 0x0001, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0030, 0x0030, 0x0000, 0x0000, 0x8071, 0x0031, 0x0006,
+ // Entry 2C0 - 2FF
+ 0x0006, 0x0006, 0x0000, 0x0001, 0x0001, 0x000d, 0x000d, 0x0001,
+ 0x0001, 0x0000, 0x0000, 0x0032, 0x0032, 0x8074, 0x8076, 0x001b,
+ 0x8077, 0x8079, 0x0028, 0x807b, 0x0034, 0x0033, 0x0033, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0006, 0x0006, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0035, 0x0035, 0x0006, 0x0006,
+ 0x0000, 0x0000, 0x0000, 0x0001, 0x0001, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0036, 0x0037, 0x0037, 0x0036, 0x0036, 0x0001,
+ 0x0001, 0x807d, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x8080,
+ // Entry 300 - 33F
+ 0x0036, 0x0036, 0x0036, 0x0000, 0x0000, 0x0006, 0x0014,
+} // Size: 1550 bytes
+
+// langToAlt is a list of numbering system and symbol set pairs, sorted and
+// marked by compact language index.
+var langToAlt = []altSymData{ // 131 elements
+ 1: {compactTag: 0x0, symIndex: 0x38, system: 0x3},
+ 2: {compactTag: 0x0, symIndex: 0x42, system: 0x4},
+ 3: {compactTag: 0xa, symIndex: 0x39, system: 0x3},
+ 4: {compactTag: 0xa, symIndex: 0x2, system: 0x0},
+ 5: {compactTag: 0x28, symIndex: 0x0, system: 0x6},
+ 6: {compactTag: 0x2c, symIndex: 0x5, system: 0x0},
+ 7: {compactTag: 0x2c, symIndex: 0x3a, system: 0x3},
+ 8: {compactTag: 0x2c, symIndex: 0x42, system: 0x4},
+ 9: {compactTag: 0x40, symIndex: 0x0, system: 0x6},
+ 10: {compactTag: 0x43, symIndex: 0x0, system: 0x0},
+ 11: {compactTag: 0x43, symIndex: 0x4f, system: 0x37},
+ 12: {compactTag: 0x46, symIndex: 0x1, system: 0x0},
+ 13: {compactTag: 0x46, symIndex: 0x38, system: 0x3},
+ 14: {compactTag: 0x54, symIndex: 0x0, system: 0x9},
+ 15: {compactTag: 0x5d, symIndex: 0x3a, system: 0x3},
+ 16: {compactTag: 0x5d, symIndex: 0x8, system: 0x0},
+ 17: {compactTag: 0x60, symIndex: 0x1, system: 0x0},
+ 18: {compactTag: 0x60, symIndex: 0x38, system: 0x3},
+ 19: {compactTag: 0x60, symIndex: 0x42, system: 0x4},
+ 20: {compactTag: 0x60, symIndex: 0x0, system: 0x5},
+ 21: {compactTag: 0x60, symIndex: 0x0, system: 0x6},
+ 22: {compactTag: 0x60, symIndex: 0x0, system: 0x8},
+ 23: {compactTag: 0x60, symIndex: 0x0, system: 0x9},
+ 24: {compactTag: 0x60, symIndex: 0x0, system: 0xa},
+ 25: {compactTag: 0x60, symIndex: 0x0, system: 0xb},
+ 26: {compactTag: 0x60, symIndex: 0x0, system: 0xc},
+ 27: {compactTag: 0x60, symIndex: 0x0, system: 0xd},
+ 28: {compactTag: 0x60, symIndex: 0x0, system: 0xe},
+ 29: {compactTag: 0x60, symIndex: 0x0, system: 0xf},
+ 30: {compactTag: 0x60, symIndex: 0x0, system: 0x11},
+ 31: {compactTag: 0x60, symIndex: 0x0, system: 0x12},
+ 32: {compactTag: 0x60, symIndex: 0x0, system: 0x13},
+ 33: {compactTag: 0x60, symIndex: 0x0, system: 0x14},
+ 34: {compactTag: 0x60, symIndex: 0x0, system: 0x15},
+ 35: {compactTag: 0x60, symIndex: 0x0, system: 0x16},
+ 36: {compactTag: 0x60, symIndex: 0x0, system: 0x17},
+ 37: {compactTag: 0x60, symIndex: 0x0, system: 0x18},
+ 38: {compactTag: 0x60, symIndex: 0x0, system: 0x19},
+ 39: {compactTag: 0x60, symIndex: 0x0, system: 0x1f},
+ 40: {compactTag: 0x60, symIndex: 0x0, system: 0x21},
+ 41: {compactTag: 0x60, symIndex: 0x0, system: 0x23},
+ 42: {compactTag: 0x60, symIndex: 0x0, system: 0x24},
+ 43: {compactTag: 0x60, symIndex: 0x0, system: 0x25},
+ 44: {compactTag: 0x60, symIndex: 0x0, system: 0x28},
+ 45: {compactTag: 0x60, symIndex: 0x0, system: 0x29},
+ 46: {compactTag: 0x60, symIndex: 0x0, system: 0x2a},
+ 47: {compactTag: 0x60, symIndex: 0x0, system: 0x2b},
+ 48: {compactTag: 0x60, symIndex: 0x0, system: 0x2c},
+ 49: {compactTag: 0x60, symIndex: 0x0, system: 0x2d},
+ 50: {compactTag: 0x60, symIndex: 0x0, system: 0x30},
+ 51: {compactTag: 0x60, symIndex: 0x0, system: 0x31},
+ 52: {compactTag: 0x60, symIndex: 0x0, system: 0x32},
+ 53: {compactTag: 0x60, symIndex: 0x0, system: 0x33},
+ 54: {compactTag: 0x60, symIndex: 0x0, system: 0x34},
+ 55: {compactTag: 0x60, symIndex: 0x0, system: 0x35},
+ 56: {compactTag: 0x60, symIndex: 0x0, system: 0x36},
+ 57: {compactTag: 0x60, symIndex: 0x0, system: 0x37},
+ 58: {compactTag: 0x60, symIndex: 0x0, system: 0x39},
+ 59: {compactTag: 0x60, symIndex: 0x0, system: 0x43},
+ 60: {compactTag: 0x64, symIndex: 0x0, system: 0x0},
+ 61: {compactTag: 0x64, symIndex: 0x38, system: 0x3},
+ 62: {compactTag: 0x64, symIndex: 0x42, system: 0x4},
+ 63: {compactTag: 0x7c, symIndex: 0x50, system: 0x37},
+ 64: {compactTag: 0x7c, symIndex: 0x0, system: 0x0},
+ 65: {compactTag: 0x114, symIndex: 0x43, system: 0x4},
+ 66: {compactTag: 0x114, symIndex: 0x18, system: 0x0},
+ 67: {compactTag: 0x114, symIndex: 0x3b, system: 0x3},
+ 68: {compactTag: 0x123, symIndex: 0x1, system: 0x0},
+ 69: {compactTag: 0x123, symIndex: 0x3c, system: 0x3},
+ 70: {compactTag: 0x123, symIndex: 0x44, system: 0x4},
+ 71: {compactTag: 0x158, symIndex: 0x0, system: 0x0},
+ 72: {compactTag: 0x158, symIndex: 0x3b, system: 0x3},
+ 73: {compactTag: 0x158, symIndex: 0x45, system: 0x4},
+ 74: {compactTag: 0x160, symIndex: 0x0, system: 0x0},
+ 75: {compactTag: 0x160, symIndex: 0x38, system: 0x3},
+ 76: {compactTag: 0x16d, symIndex: 0x1b, system: 0x0},
+ 77: {compactTag: 0x16d, symIndex: 0x0, system: 0x9},
+ 78: {compactTag: 0x16d, symIndex: 0x0, system: 0xa},
+ 79: {compactTag: 0x17c, symIndex: 0x0, system: 0x0},
+ 80: {compactTag: 0x17c, symIndex: 0x3d, system: 0x3},
+ 81: {compactTag: 0x17c, symIndex: 0x42, system: 0x4},
+ 82: {compactTag: 0x182, symIndex: 0x6, system: 0x0},
+ 83: {compactTag: 0x182, symIndex: 0x38, system: 0x3},
+ 84: {compactTag: 0x1b1, symIndex: 0x0, system: 0x0},
+ 85: {compactTag: 0x1b1, symIndex: 0x3e, system: 0x3},
+ 86: {compactTag: 0x1b6, symIndex: 0x42, system: 0x4},
+ 87: {compactTag: 0x1b6, symIndex: 0x1b, system: 0x0},
+ 88: {compactTag: 0x1d2, symIndex: 0x42, system: 0x4},
+ 89: {compactTag: 0x1d2, symIndex: 0x0, system: 0x0},
+ 90: {compactTag: 0x1f3, symIndex: 0x0, system: 0xb},
+ 91: {compactTag: 0x1fd, symIndex: 0x4e, system: 0x24},
+ 92: {compactTag: 0x1fd, symIndex: 0x26, system: 0x0},
+ 93: {compactTag: 0x1ff, symIndex: 0x42, system: 0x4},
+ 94: {compactTag: 0x204, symIndex: 0x15, system: 0x0},
+ 95: {compactTag: 0x204, symIndex: 0x3f, system: 0x3},
+ 96: {compactTag: 0x204, symIndex: 0x46, system: 0x4},
+ 97: {compactTag: 0x20c, symIndex: 0x0, system: 0xb},
+ 98: {compactTag: 0x20f, symIndex: 0x6, system: 0x0},
+ 99: {compactTag: 0x20f, symIndex: 0x38, system: 0x3},
+ 100: {compactTag: 0x20f, symIndex: 0x42, system: 0x4},
+ 101: {compactTag: 0x22e, symIndex: 0x0, system: 0x0},
+ 102: {compactTag: 0x22e, symIndex: 0x47, system: 0x4},
+ 103: {compactTag: 0x22f, symIndex: 0x42, system: 0x4},
+ 104: {compactTag: 0x22f, symIndex: 0x1b, system: 0x0},
+ 105: {compactTag: 0x238, symIndex: 0x42, system: 0x4},
+ 106: {compactTag: 0x238, symIndex: 0x28, system: 0x0},
+ 107: {compactTag: 0x265, symIndex: 0x38, system: 0x3},
+ 108: {compactTag: 0x265, symIndex: 0x0, system: 0x0},
+ 109: {compactTag: 0x29d, symIndex: 0x22, system: 0x0},
+ 110: {compactTag: 0x29d, symIndex: 0x40, system: 0x3},
+ 111: {compactTag: 0x29d, symIndex: 0x48, system: 0x4},
+ 112: {compactTag: 0x29d, symIndex: 0x4d, system: 0xc},
+ 113: {compactTag: 0x2bd, symIndex: 0x31, system: 0x0},
+ 114: {compactTag: 0x2bd, symIndex: 0x3e, system: 0x3},
+ 115: {compactTag: 0x2bd, symIndex: 0x42, system: 0x4},
+ 116: {compactTag: 0x2cd, symIndex: 0x1b, system: 0x0},
+ 117: {compactTag: 0x2cd, symIndex: 0x49, system: 0x4},
+ 118: {compactTag: 0x2ce, symIndex: 0x49, system: 0x4},
+ 119: {compactTag: 0x2d0, symIndex: 0x33, system: 0x0},
+ 120: {compactTag: 0x2d0, symIndex: 0x4a, system: 0x4},
+ 121: {compactTag: 0x2d1, symIndex: 0x42, system: 0x4},
+ 122: {compactTag: 0x2d1, symIndex: 0x28, system: 0x0},
+ 123: {compactTag: 0x2d3, symIndex: 0x34, system: 0x0},
+ 124: {compactTag: 0x2d3, symIndex: 0x4b, system: 0x4},
+ 125: {compactTag: 0x2f9, symIndex: 0x0, system: 0x0},
+ 126: {compactTag: 0x2f9, symIndex: 0x38, system: 0x3},
+ 127: {compactTag: 0x2f9, symIndex: 0x42, system: 0x4},
+ 128: {compactTag: 0x2ff, symIndex: 0x36, system: 0x0},
+ 129: {compactTag: 0x2ff, symIndex: 0x41, system: 0x3},
+ 130: {compactTag: 0x2ff, symIndex: 0x4c, system: 0x4},
+} // Size: 810 bytes
+
+var tagToDecimal = []uint8{ // 775 elements
+ // Entry 0 - 3F
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x05, 0x05, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ // Entry 40 - 7F
+ 0x05, 0x05, 0x05, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x05, 0x05, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x05, 0x05, 0x05, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x05, 0x05, 0x01, 0x01,
+ // Entry 80 - BF
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x05, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ // Entry C0 - FF
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ // Entry 100 - 13F
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ // Entry 140 - 17F
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x05, 0x05, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x05,
+ 0x05, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ // Entry 180 - 1BF
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x05, 0x05, 0x05, 0x05,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ // Entry 1C0 - 1FF
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x05, 0x05,
+ 0x01, 0x01, 0x01, 0x05, 0x05, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ // Entry 200 - 23F
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x05, 0x05, 0x01, 0x01, 0x01, 0x05, 0x01,
+ 0x01, 0x05, 0x05, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ // Entry 240 - 27F
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ // Entry 280 - 2BF
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x05,
+ 0x05, 0x05, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ // Entry 2C0 - 2FF
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ // Entry 300 - 33F
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x08,
+} // Size: 799 bytes
+
+var tagToScientific = []uint8{ // 775 elements
+ // Entry 0 - 3F
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ // Entry 40 - 7F
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ // Entry 80 - BF
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ // Entry C0 - FF
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ // Entry 100 - 13F
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ // Entry 140 - 17F
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x0c, 0x0c, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x0c,
+ 0x0c, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ // Entry 180 - 1BF
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ // Entry 1C0 - 1FF
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x0d, 0x0d, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x0c, 0x0c, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ // Entry 200 - 23F
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x0c, 0x02,
+ 0x02, 0x0c, 0x0c, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ // Entry 240 - 27F
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x0d, 0x0d, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ // Entry 280 - 2BF
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ // Entry 2C0 - 2FF
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ // Entry 300 - 33F
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x09,
+} // Size: 799 bytes
+
+var tagToPercent = []uint8{ // 775 elements
+ // Entry 0 - 3F
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x06, 0x06, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x03, 0x03, 0x03, 0x03, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ // Entry 40 - 7F
+ 0x06, 0x06, 0x06, 0x04, 0x04, 0x04, 0x03, 0x03,
+ 0x06, 0x06, 0x03, 0x04, 0x04, 0x03, 0x03, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x06, 0x06, 0x06, 0x03,
+ 0x03, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x03, 0x03, 0x04, 0x04, 0x04, 0x04, 0x03, 0x03,
+ 0x03, 0x04, 0x04, 0x03, 0x03, 0x03, 0x04, 0x03,
+ 0x03, 0x04, 0x03, 0x04, 0x04, 0x03, 0x03, 0x03,
+ 0x03, 0x04, 0x04, 0x04, 0x07, 0x07, 0x04, 0x04,
+ // Entry 80 - BF
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x03, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x03, 0x04, 0x03, 0x04,
+ 0x04, 0x03, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x06, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ // Entry C0 - FF
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x03, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
+ 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
+ // Entry 100 - 13F
+ 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
+ 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x04, 0x04,
+ 0x0b, 0x0b, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x03, 0x03, 0x04, 0x04,
+ 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
+ 0x03, 0x03, 0x03, 0x03, 0x03, 0x04, 0x03, 0x03,
+ 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
+ 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
+ // Entry 140 - 17F
+ 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
+ 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
+ 0x03, 0x03, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
+ 0x06, 0x06, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x06,
+ 0x06, 0x04, 0x04, 0x04, 0x03, 0x03, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ // Entry 180 - 1BF
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x03, 0x03, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x06, 0x06, 0x06, 0x06,
+ 0x04, 0x04, 0x04, 0x04, 0x03, 0x03, 0x04, 0x04,
+ // Entry 1C0 - 1FF
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x03, 0x03, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x03, 0x03, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ // Entry 200 - 23F
+ 0x04, 0x04, 0x04, 0x04, 0x03, 0x03, 0x03, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x03, 0x03, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x06, 0x06, 0x04, 0x04, 0x04, 0x06, 0x04,
+ 0x04, 0x06, 0x06, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ // Entry 240 - 27F
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x03,
+ 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
+ 0x03, 0x03, 0x04, 0x04, 0x03, 0x03, 0x03, 0x03,
+ 0x03, 0x03, 0x03, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x03, 0x03, 0x03, 0x03, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x03, 0x03, 0x03, 0x03, 0x04, 0x04,
+ // Entry 280 - 2BF
+ 0x04, 0x03, 0x03, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x03, 0x03, 0x03,
+ 0x03, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x06,
+ 0x06, 0x06, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x03, 0x03, 0x04, 0x04, 0x04, 0x04, 0x0e,
+ // Entry 2C0 - 2FF
+ 0x0e, 0x0e, 0x04, 0x03, 0x03, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x03,
+ 0x03, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
+ // Entry 300 - 33F
+ 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x0a,
+} // Size: 799 bytes
+
+var formats = []Pattern{Pattern{RoundingContext: RoundingContext{MaxSignificantDigits: 0,
+ MaxFractionDigits: 0,
+ Increment: 0x0,
+ IncrementScale: 0x0,
+ Mode: 0x0,
+ DigitShift: 0x0,
+ MinIntegerDigits: 0x0,
+ MaxIntegerDigits: 0x0,
+ MinFractionDigits: 0x0,
+ MinSignificantDigits: 0x0,
+ MinExponentDigits: 0x0},
+ Affix: "",
+ Offset: 0x0,
+ NegOffset: 0x0,
+ PadRune: 0,
+ FormatWidth: 0x0,
+ GroupingSize: [2]uint8{0x0,
+ 0x0},
+ Flags: 0x0},
+ Pattern{RoundingContext: RoundingContext{MaxSignificantDigits: 0,
+ MaxFractionDigits: 3,
+ Increment: 0x0,
+ IncrementScale: 0x0,
+ Mode: 0x0,
+ DigitShift: 0x0,
+ MinIntegerDigits: 0x1,
+ MaxIntegerDigits: 0x0,
+ MinFractionDigits: 0x0,
+ MinSignificantDigits: 0x0,
+ MinExponentDigits: 0x0},
+ Affix: "",
+ Offset: 0x0,
+ NegOffset: 0x0,
+ PadRune: 0,
+ FormatWidth: 0x9,
+ GroupingSize: [2]uint8{0x3,
+ 0x0},
+ Flags: 0x0},
+ Pattern{RoundingContext: RoundingContext{MaxSignificantDigits: 0,
+ MaxFractionDigits: 0,
+ Increment: 0x0,
+ IncrementScale: 0x0,
+ Mode: 0x0,
+ DigitShift: 0x0,
+ MinIntegerDigits: 0x0,
+ MaxIntegerDigits: 0x1,
+ MinFractionDigits: 0x0,
+ MinSignificantDigits: 0x0,
+ MinExponentDigits: 0x1},
+ Affix: "",
+ Offset: 0x0,
+ NegOffset: 0x0,
+ PadRune: 0,
+ FormatWidth: 0x3,
+ GroupingSize: [2]uint8{0x0,
+ 0x0},
+ Flags: 0x0},
+ Pattern{RoundingContext: RoundingContext{MaxSignificantDigits: 0,
+ MaxFractionDigits: 0,
+ Increment: 0x0,
+ IncrementScale: 0x0,
+ Mode: 0x0,
+ DigitShift: 0x2,
+ MinIntegerDigits: 0x1,
+ MaxIntegerDigits: 0x0,
+ MinFractionDigits: 0x0,
+ MinSignificantDigits: 0x0,
+ MinExponentDigits: 0x0},
+ Affix: "\x00\x03\u00a0%",
+ Offset: 0x0,
+ NegOffset: 0x0,
+ PadRune: 0,
+ FormatWidth: 0x7,
+ GroupingSize: [2]uint8{0x3,
+ 0x0},
+ Flags: 0x0},
+ Pattern{RoundingContext: RoundingContext{MaxSignificantDigits: 0,
+ MaxFractionDigits: 0,
+ Increment: 0x0,
+ IncrementScale: 0x0,
+ Mode: 0x0,
+ DigitShift: 0x2,
+ MinIntegerDigits: 0x1,
+ MaxIntegerDigits: 0x0,
+ MinFractionDigits: 0x0,
+ MinSignificantDigits: 0x0,
+ MinExponentDigits: 0x0},
+ Affix: "\x00\x01%",
+ Offset: 0x0,
+ NegOffset: 0x0,
+ PadRune: 0,
+ FormatWidth: 0x6,
+ GroupingSize: [2]uint8{0x3,
+ 0x0},
+ Flags: 0x0},
+ Pattern{RoundingContext: RoundingContext{MaxSignificantDigits: 0,
+ MaxFractionDigits: 3,
+ Increment: 0x0,
+ IncrementScale: 0x0,
+ Mode: 0x0,
+ DigitShift: 0x0,
+ MinIntegerDigits: 0x1,
+ MaxIntegerDigits: 0x0,
+ MinFractionDigits: 0x0,
+ MinSignificantDigits: 0x0,
+ MinExponentDigits: 0x0},
+ Affix: "",
+ Offset: 0x0,
+ NegOffset: 0x0,
+ PadRune: 0,
+ FormatWidth: 0xc,
+ GroupingSize: [2]uint8{0x3,
+ 0x2},
+ Flags: 0x0},
+ Pattern{RoundingContext: RoundingContext{MaxSignificantDigits: 0,
+ MaxFractionDigits: 0,
+ Increment: 0x0,
+ IncrementScale: 0x0,
+ Mode: 0x0,
+ DigitShift: 0x2,
+ MinIntegerDigits: 0x1,
+ MaxIntegerDigits: 0x0,
+ MinFractionDigits: 0x0,
+ MinSignificantDigits: 0x0,
+ MinExponentDigits: 0x0},
+ Affix: "\x00\x01%",
+ Offset: 0x0,
+ NegOffset: 0x0,
+ PadRune: 0,
+ FormatWidth: 0x9,
+ GroupingSize: [2]uint8{0x3,
+ 0x2},
+ Flags: 0x0},
+ Pattern{RoundingContext: RoundingContext{MaxSignificantDigits: 0,
+ MaxFractionDigits: 0,
+ Increment: 0x0,
+ IncrementScale: 0x0,
+ Mode: 0x0,
+ DigitShift: 0x2,
+ MinIntegerDigits: 0x1,
+ MaxIntegerDigits: 0x0,
+ MinFractionDigits: 0x0,
+ MinSignificantDigits: 0x0,
+ MinExponentDigits: 0x0},
+ Affix: "\x00\x03\u00a0%",
+ Offset: 0x0,
+ NegOffset: 0x0,
+ PadRune: 0,
+ FormatWidth: 0xa,
+ GroupingSize: [2]uint8{0x3,
+ 0x2},
+ Flags: 0x0},
+ Pattern{RoundingContext: RoundingContext{MaxSignificantDigits: 0,
+ MaxFractionDigits: 6,
+ Increment: 0x0,
+ IncrementScale: 0x0,
+ Mode: 0x0,
+ DigitShift: 0x0,
+ MinIntegerDigits: 0x1,
+ MaxIntegerDigits: 0x0,
+ MinFractionDigits: 0x0,
+ MinSignificantDigits: 0x0,
+ MinExponentDigits: 0x0},
+ Affix: "",
+ Offset: 0x0,
+ NegOffset: 0x0,
+ PadRune: 0,
+ FormatWidth: 0x8,
+ GroupingSize: [2]uint8{0x0,
+ 0x0},
+ Flags: 0x0},
+ Pattern{RoundingContext: RoundingContext{MaxSignificantDigits: 0,
+ MaxFractionDigits: 6,
+ Increment: 0x0,
+ IncrementScale: 0x0,
+ Mode: 0x0,
+ DigitShift: 0x0,
+ MinIntegerDigits: 0x1,
+ MaxIntegerDigits: 0x0,
+ MinFractionDigits: 0x6,
+ MinSignificantDigits: 0x0,
+ MinExponentDigits: 0x3},
+ Affix: "",
+ Offset: 0x0,
+ NegOffset: 0x0,
+ PadRune: 0,
+ FormatWidth: 0xd,
+ GroupingSize: [2]uint8{0x0,
+ 0x0},
+ Flags: 0x4},
+ Pattern{RoundingContext: RoundingContext{MaxSignificantDigits: 0,
+ MaxFractionDigits: 0,
+ Increment: 0x0,
+ IncrementScale: 0x0,
+ Mode: 0x0,
+ DigitShift: 0x2,
+ MinIntegerDigits: 0x1,
+ MaxIntegerDigits: 0x0,
+ MinFractionDigits: 0x0,
+ MinSignificantDigits: 0x0,
+ MinExponentDigits: 0x0},
+ Affix: "\x00\x01%",
+ Offset: 0x0,
+ NegOffset: 0x0,
+ PadRune: 0,
+ FormatWidth: 0x2,
+ GroupingSize: [2]uint8{0x0,
+ 0x0},
+ Flags: 0x0},
+ Pattern{RoundingContext: RoundingContext{MaxSignificantDigits: 0,
+ MaxFractionDigits: 0,
+ Increment: 0x0,
+ IncrementScale: 0x0,
+ Mode: 0x0,
+ DigitShift: 0x2,
+ MinIntegerDigits: 0x1,
+ MaxIntegerDigits: 0x0,
+ MinFractionDigits: 0x0,
+ MinSignificantDigits: 0x0,
+ MinExponentDigits: 0x0},
+ Affix: "\x03%\u00a0\x00",
+ Offset: 0x0,
+ NegOffset: 0x0,
+ PadRune: 0,
+ FormatWidth: 0x7,
+ GroupingSize: [2]uint8{0x3,
+ 0x0},
+ Flags: 0x0},
+ Pattern{RoundingContext: RoundingContext{MaxSignificantDigits: 0,
+ MaxFractionDigits: 0,
+ Increment: 0x0,
+ IncrementScale: 0x0,
+ Mode: 0x0,
+ DigitShift: 0x0,
+ MinIntegerDigits: 0x0,
+ MaxIntegerDigits: 0x1,
+ MinFractionDigits: 0x0,
+ MinSignificantDigits: 0x0,
+ MinExponentDigits: 0x1},
+ Affix: "\x01[\x01]",
+ Offset: 0x0,
+ NegOffset: 0x0,
+ PadRune: 0,
+ FormatWidth: 0x5,
+ GroupingSize: [2]uint8{0x0,
+ 0x0},
+ Flags: 0x0},
+ Pattern{RoundingContext: RoundingContext{MaxSignificantDigits: 0,
+ MaxFractionDigits: 0,
+ Increment: 0x0,
+ IncrementScale: 0x0,
+ Mode: 0x0,
+ DigitShift: 0x0,
+ MinIntegerDigits: 0x0,
+ MaxIntegerDigits: 0x0,
+ MinFractionDigits: 0x0,
+ MinSignificantDigits: 0x0,
+ MinExponentDigits: 0x0},
+ Affix: "",
+ Offset: 0x0,
+ NegOffset: 0x0,
+ PadRune: 0,
+ FormatWidth: 0x1,
+ GroupingSize: [2]uint8{0x0,
+ 0x0},
+ Flags: 0x0},
+ Pattern{RoundingContext: RoundingContext{MaxSignificantDigits: 0,
+ MaxFractionDigits: 0,
+ Increment: 0x0,
+ IncrementScale: 0x0,
+ Mode: 0x0,
+ DigitShift: 0x2,
+ MinIntegerDigits: 0x1,
+ MaxIntegerDigits: 0x0,
+ MinFractionDigits: 0x0,
+ MinSignificantDigits: 0x0,
+ MinExponentDigits: 0x0},
+ Affix: "\x01%\x00",
+ Offset: 0x0,
+ NegOffset: 0x0,
+ PadRune: 0,
+ FormatWidth: 0x6,
+ GroupingSize: [2]uint8{0x3,
+ 0x0},
+ Flags: 0x0}}
+
+// Total table size 8634 bytes (8KiB); checksum: 8F23386D
diff --git a/vendor/golang.org/x/text/internal/stringset/set.go b/vendor/golang.org/x/text/internal/stringset/set.go
new file mode 100644
index 0000000000..bb2fffbc75
--- /dev/null
+++ b/vendor/golang.org/x/text/internal/stringset/set.go
@@ -0,0 +1,86 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package stringset provides a way to represent a collection of strings
+// compactly.
+package stringset
+
+import "sort"
+
+// A Set holds a collection of strings that can be looked up by an index number.
+type Set struct {
+ // These fields are exported to allow for code generation.
+
+ Data string
+ Index []uint16
+}
+
+// Elem returns the string with index i. It panics if i is out of range.
+func (s *Set) Elem(i int) string {
+ return s.Data[s.Index[i]:s.Index[i+1]]
+}
+
+// Len returns the number of strings in the set.
+func (s *Set) Len() int {
+ return len(s.Index) - 1
+}
+
+// Search returns the index of the given string or -1 if it is not in the set.
+// The Set must have been created with strings in sorted order.
+func Search(s *Set, str string) int {
+ // TODO: optimize this if it gets used a lot.
+ n := len(s.Index) - 1
+ p := sort.Search(n, func(i int) bool {
+ return s.Elem(i) >= str
+ })
+ if p == n || str != s.Elem(p) {
+ return -1
+ }
+ return p
+}
+
+// A Builder constructs Sets.
+type Builder struct {
+ set Set
+ index map[string]int
+}
+
+// NewBuilder returns a new and initialized Builder.
+func NewBuilder() *Builder {
+ return &Builder{
+ set: Set{
+ Index: []uint16{0},
+ },
+ index: map[string]int{},
+ }
+}
+
+// Set creates the set created so far.
+func (b *Builder) Set() Set {
+ return b.set
+}
+
+// Index returns the index for the given string, which must have been added
+// before.
+func (b *Builder) Index(s string) int {
+ return b.index[s]
+}
+
+// Add adds a string to the index. Strings that are added by a single Add will
+// be stored together, unless they match an existing string.
+func (b *Builder) Add(ss ...string) {
+ // First check if the string already exists.
+ for _, s := range ss {
+ if _, ok := b.index[s]; ok {
+ continue
+ }
+ b.index[s] = len(b.set.Index) - 1
+ b.set.Data += s
+ x := len(b.set.Data)
+ if x > 0xFFFF {
+ panic("Index too > 0xFFFF")
+ }
+ b.set.Index = append(b.set.Index, uint16(x))
+ }
+}
diff --git a/vendor/golang.org/x/text/message/catalog.go b/vendor/golang.org/x/text/message/catalog.go
new file mode 100644
index 0000000000..068271def4
--- /dev/null
+++ b/vendor/golang.org/x/text/message/catalog.go
@@ -0,0 +1,36 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package message
+
+// TODO: some types in this file will need to be made public at some time.
+// Documentation and method names will reflect this by using the exported name.
+
+import (
+ "golang.org/x/text/language"
+ "golang.org/x/text/message/catalog"
+)
+
+// MatchLanguage reports the matched tag obtained from language.MatchStrings for
+// the Matcher of the DefaultCatalog.
+func MatchLanguage(preferred ...string) language.Tag {
+ c := DefaultCatalog
+ tag, _ := language.MatchStrings(c.Matcher(), preferred...)
+ return tag
+}
+
+// DefaultCatalog is used by SetString.
+var DefaultCatalog catalog.Catalog = defaultCatalog
+
+var defaultCatalog = catalog.NewBuilder()
+
+// SetString calls SetString on the initial default Catalog.
+func SetString(tag language.Tag, key string, msg string) error {
+ return defaultCatalog.SetString(tag, key, msg)
+}
+
+// Set calls Set on the initial default Catalog.
+func Set(tag language.Tag, key string, msg ...catalog.Message) error {
+ return defaultCatalog.Set(tag, key, msg...)
+}
diff --git a/vendor/golang.org/x/text/message/catalog/catalog.go b/vendor/golang.org/x/text/message/catalog/catalog.go
new file mode 100644
index 0000000000..96955d0752
--- /dev/null
+++ b/vendor/golang.org/x/text/message/catalog/catalog.go
@@ -0,0 +1,365 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package catalog defines collections of translated format strings.
+//
+// This package mostly defines types for populating catalogs with messages. The
+// catmsg package contains further definitions for creating custom message and
+// dictionary types as well as packages that use Catalogs.
+//
+// Package catalog defines various interfaces: Dictionary, Loader, and Message.
+// A Dictionary maintains a set of translations of format strings for a single
+// language. The Loader interface defines a source of dictionaries. A
+// translation of a format string is represented by a Message.
+//
+// # Catalogs
+//
+// A Catalog defines a programmatic interface for setting message translations.
+// It maintains a set of per-language dictionaries with translations for a set
+// of keys. For message translation to function properly, a translation should
+// be defined for each key for each supported language. A dictionary may be
+// underspecified, though, if there is a parent language that already defines
+// the key. For example, a Dictionary for "en-GB" could leave out entries that
+// are identical to those in a dictionary for "en".
+//
+// # Messages
+//
+// A Message is a format string which varies on the value of substitution
+// variables. For instance, to indicate the number of results one could want "no
+// results" if there are none, "1 result" if there is 1, and "%d results" for
+// any other number. Catalog is agnostic to the kind of format strings that are
+// used: for instance, messages can follow either the printf-style substitution
+// from package fmt or use templates.
+//
+// A Message does not substitute arguments in the format string. This job is
+// reserved for packages that render strings, such as message, that use Catalogs
+// to selected string. This separation of concerns allows Catalog to be used to
+// store any kind of formatting strings.
+//
+// # Selecting messages based on linguistic features of substitution arguments
+//
+// Messages may vary based on any linguistic features of the argument values.
+// The most common one is plural form, but others exist.
+//
+// Selection messages are provided in packages that provide support for a
+// specific linguistic feature. The following snippet uses plural.Selectf:
+//
+// catalog.Set(language.English, "You are %d minute(s) late.",
+// plural.Selectf(1, "",
+// plural.One, "You are 1 minute late.",
+// plural.Other, "You are %d minutes late."))
+//
+// In this example, a message is stored in the Catalog where one of two messages
+// is selected based on the first argument, a number. The first message is
+// selected if the argument is singular (identified by the selector "one") and
+// the second message is selected in all other cases. The selectors are defined
+// by the plural rules defined in CLDR. The selector "other" is special and will
+// always match. Each language always defines one of the linguistic categories
+// to be "other." For English, singular is "one" and plural is "other".
+//
+// Selects can be nested. This allows selecting sentences based on features of
+// multiple arguments or multiple linguistic properties of a single argument.
+//
+// # String interpolation
+//
+// There is often a lot of commonality between the possible variants of a
+// message. For instance, in the example above the word "minute" varies based on
+// the plural catogory of the argument, but the rest of the sentence is
+// identical. Using interpolation the above message can be rewritten as:
+//
+// catalog.Set(language.English, "You are %d minute(s) late.",
+// catalog.Var("minutes",
+// plural.Selectf(1, "", plural.One, "minute", plural.Other, "minutes")),
+// catalog.String("You are %[1]d ${minutes} late."))
+//
+// Var is defined to return the variable name if the message does not yield a
+// match. This allows us to further simplify this snippet to
+//
+// catalog.Set(language.English, "You are %d minute(s) late.",
+// catalog.Var("minutes", plural.Selectf(1, "", plural.One, "minute")),
+// catalog.String("You are %d ${minutes} late."))
+//
+// Overall this is still only a minor improvement, but things can get a lot more
+// unwieldy if more than one linguistic feature is used to determine a message
+// variant. Consider the following example:
+//
+// // argument 1: list of hosts, argument 2: list of guests
+// catalog.Set(language.English, "%[1]v invite(s) %[2]v to their party.",
+// catalog.Var("their",
+// plural.Selectf(1, ""
+// plural.One, gender.Select(1, "female", "her", "other", "his"))),
+// catalog.Var("invites", plural.Selectf(1, "", plural.One, "invite"))
+// catalog.String("%[1]v ${invites} %[2]v to ${their} party.")),
+//
+// Without variable substitution, this would have to be written as
+//
+// // argument 1: list of hosts, argument 2: list of guests
+// catalog.Set(language.English, "%[1]v invite(s) %[2]v to their party.",
+// plural.Selectf(1, "",
+// plural.One, gender.Select(1,
+// "female", "%[1]v invites %[2]v to her party."
+// "other", "%[1]v invites %[2]v to his party."),
+// plural.Other, "%[1]v invites %[2]v to their party."))
+//
+// Not necessarily shorter, but using variables there is less duplication and
+// the messages are more maintenance friendly. Moreover, languages may have up
+// to six plural forms. This makes the use of variables more welcome.
+//
+// Different messages using the same inflections can reuse variables by moving
+// them to macros. Using macros we can rewrite the message as:
+//
+// // argument 1: list of hosts, argument 2: list of guests
+// catalog.SetString(language.English, "%[1]v invite(s) %[2]v to their party.",
+// "%[1]v ${invites(1)} %[2]v to ${their(1)} party.")
+//
+// Where the following macros were defined separately.
+//
+// catalog.SetMacro(language.English, "invites", plural.Selectf(1, "",
+// plural.One, "invite"))
+// catalog.SetMacro(language.English, "their", plural.Selectf(1, "",
+// plural.One, gender.Select(1, "female", "her", "other", "his"))),
+//
+// Placeholders use parentheses and the arguments to invoke a macro.
+//
+// # Looking up messages
+//
+// Message lookup using Catalogs is typically only done by specialized packages
+// and is not something the user should be concerned with. For instance, to
+// express the tardiness of a user using the related message we defined earlier,
+// the user may use the package message like so:
+//
+// p := message.NewPrinter(language.English)
+// p.Printf("You are %d minute(s) late.", 5)
+//
+// Which would print:
+//
+// You are 5 minutes late.
+//
+// This package is UNDER CONSTRUCTION and its API may change.
+package catalog // import "golang.org/x/text/message/catalog"
+
+// TODO:
+// Some way to freeze a catalog.
+// - Locking on each lockup turns out to be about 50% of the total running time
+// for some of the benchmarks in the message package.
+// Consider these:
+// - Sequence type to support sequences in user-defined messages.
+// - Garbage collection: Remove dictionaries that can no longer be reached
+// as other dictionaries have been added that cover all possible keys.
+
+import (
+ "errors"
+ "fmt"
+
+ "golang.org/x/text/internal"
+
+ "golang.org/x/text/internal/catmsg"
+ "golang.org/x/text/language"
+)
+
+// A Catalog allows lookup of translated messages.
+type Catalog interface {
+ // Languages returns all languages for which the Catalog contains variants.
+ Languages() []language.Tag
+
+ // Matcher returns a Matcher for languages from this Catalog.
+ Matcher() language.Matcher
+
+ // A Context is used for evaluating Messages.
+ Context(tag language.Tag, r catmsg.Renderer) *Context
+
+ // This method also makes Catalog a private interface.
+ lookup(tag language.Tag, key string) (data string, ok bool)
+}
+
+// NewFromMap creates a Catalog from the given map. If a Dictionary is
+// underspecified the entry is retrieved from a parent language.
+func NewFromMap(dictionaries map[string]Dictionary, opts ...Option) (Catalog, error) {
+ options := options{}
+ for _, o := range opts {
+ o(&options)
+ }
+ c := &catalog{
+ dicts: map[language.Tag]Dictionary{},
+ }
+ _, hasFallback := dictionaries[options.fallback.String()]
+ if hasFallback {
+ // TODO: Should it be okay to not have a fallback language?
+ // Catalog generators could enforce there is always a fallback.
+ c.langs = append(c.langs, options.fallback)
+ }
+ for lang, dict := range dictionaries {
+ tag, err := language.Parse(lang)
+ if err != nil {
+ return nil, fmt.Errorf("catalog: invalid language tag %q", lang)
+ }
+ if _, ok := c.dicts[tag]; ok {
+ return nil, fmt.Errorf("catalog: duplicate entry for tag %q after normalization", tag)
+ }
+ c.dicts[tag] = dict
+ if !hasFallback || tag != options.fallback {
+ c.langs = append(c.langs, tag)
+ }
+ }
+ if hasFallback {
+ internal.SortTags(c.langs[1:])
+ } else {
+ internal.SortTags(c.langs)
+ }
+ c.matcher = language.NewMatcher(c.langs)
+ return c, nil
+}
+
+// A Dictionary is a source of translations for a single language.
+type Dictionary interface {
+ // Lookup returns a message compiled with catmsg.Compile for the given key.
+ // It returns false for ok if such a message could not be found.
+ Lookup(key string) (data string, ok bool)
+}
+
+type catalog struct {
+ langs []language.Tag
+ dicts map[language.Tag]Dictionary
+ macros store
+ matcher language.Matcher
+}
+
+func (c *catalog) Languages() []language.Tag { return c.langs }
+func (c *catalog) Matcher() language.Matcher { return c.matcher }
+
+func (c *catalog) lookup(tag language.Tag, key string) (data string, ok bool) {
+ for ; ; tag = tag.Parent() {
+ if dict, ok := c.dicts[tag]; ok {
+ if data, ok := dict.Lookup(key); ok {
+ return data, true
+ }
+ }
+ if tag == language.Und {
+ break
+ }
+ }
+ return "", false
+}
+
+// Context returns a Context for formatting messages.
+// Only one Message may be formatted per context at any given time.
+func (c *catalog) Context(tag language.Tag, r catmsg.Renderer) *Context {
+ return &Context{
+ cat: c,
+ tag: tag,
+ dec: catmsg.NewDecoder(tag, r, &dict{&c.macros, tag}),
+ }
+}
+
+// A Builder allows building a Catalog programmatically.
+type Builder struct {
+ options
+ matcher language.Matcher
+
+ index store
+ macros store
+}
+
+type options struct {
+ fallback language.Tag
+}
+
+// An Option configures Catalog behavior.
+type Option func(*options)
+
+// Fallback specifies the default fallback language. The default is Und.
+func Fallback(tag language.Tag) Option {
+ return func(o *options) { o.fallback = tag }
+}
+
+// TODO:
+// // Catalogs specifies one or more sources for a Catalog.
+// // Lookups are in order.
+// // This can be changed inserting a Catalog used for setting, which implements
+// // Loader, used for setting in the chain.
+// func Catalogs(d ...Loader) Option {
+// return nil
+// }
+//
+// func Delims(start, end string) Option {}
+//
+// func Dict(tag language.Tag, d ...Dictionary) Option
+
+// NewBuilder returns an empty mutable Catalog.
+func NewBuilder(opts ...Option) *Builder {
+ c := &Builder{}
+ for _, o := range opts {
+ o(&c.options)
+ }
+ return c
+}
+
+// SetString is shorthand for Set(tag, key, String(msg)).
+func (c *Builder) SetString(tag language.Tag, key string, msg string) error {
+ return c.set(tag, key, &c.index, String(msg))
+}
+
+// Set sets the translation for the given language and key.
+//
+// When evaluation this message, the first Message in the sequence to msgs to
+// evaluate to a string will be the message returned.
+func (c *Builder) Set(tag language.Tag, key string, msg ...Message) error {
+ return c.set(tag, key, &c.index, msg...)
+}
+
+// SetMacro defines a Message that may be substituted in another message.
+// The arguments to a macro Message are passed as arguments in the
+// placeholder the form "${foo(arg1, arg2)}".
+func (c *Builder) SetMacro(tag language.Tag, name string, msg ...Message) error {
+ return c.set(tag, name, &c.macros, msg...)
+}
+
+// ErrNotFound indicates there was no message for the given key.
+var ErrNotFound = errors.New("catalog: message not found")
+
+// String specifies a plain message string. It can be used as fallback if no
+// other strings match or as a simple standalone message.
+//
+// It is an error to pass more than one String in a message sequence.
+func String(name string) Message {
+ return catmsg.String(name)
+}
+
+// Var sets a variable that may be substituted in formatting patterns using
+// named substitution of the form "${name}". The name argument is used as a
+// fallback if the statements do not produce a match. The statement sequence may
+// not contain any Var calls.
+//
+// The name passed to a Var must be unique within message sequence.
+func Var(name string, msg ...Message) Message {
+ return &catmsg.Var{Name: name, Message: firstInSequence(msg)}
+}
+
+// Context returns a Context for formatting messages.
+// Only one Message may be formatted per context at any given time.
+func (b *Builder) Context(tag language.Tag, r catmsg.Renderer) *Context {
+ return &Context{
+ cat: b,
+ tag: tag,
+ dec: catmsg.NewDecoder(tag, r, &dict{&b.macros, tag}),
+ }
+}
+
+// A Context is used for evaluating Messages.
+// Only one Message may be formatted per context at any given time.
+type Context struct {
+ cat Catalog
+ tag language.Tag // TODO: use compact index.
+ dec *catmsg.Decoder
+}
+
+// Execute looks up and executes the message with the given key.
+// It returns ErrNotFound if no message could be found in the index.
+func (c *Context) Execute(key string) error {
+ data, ok := c.cat.lookup(c.tag, key)
+ if !ok {
+ return ErrNotFound
+ }
+ return c.dec.Execute(data)
+}
diff --git a/vendor/golang.org/x/text/message/catalog/dict.go b/vendor/golang.org/x/text/message/catalog/dict.go
new file mode 100644
index 0000000000..a0eb81810b
--- /dev/null
+++ b/vendor/golang.org/x/text/message/catalog/dict.go
@@ -0,0 +1,129 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package catalog
+
+import (
+ "sync"
+
+ "golang.org/x/text/internal"
+ "golang.org/x/text/internal/catmsg"
+ "golang.org/x/text/language"
+)
+
+// TODO:
+// Dictionary returns a Dictionary that returns the first Message, using the
+// given language tag, that matches:
+// 1. the last one registered by one of the Set methods
+// 2. returned by one of the Loaders
+// 3. repeat from 1. using the parent language
+// This approach allows messages to be underspecified.
+// func (c *Catalog) Dictionary(tag language.Tag) (Dictionary, error) {
+// // TODO: verify dictionary exists.
+// return &dict{&c.index, tag}, nil
+// }
+
+type dict struct {
+ s *store
+ tag language.Tag // TODO: make compact tag.
+}
+
+func (d *dict) Lookup(key string) (data string, ok bool) {
+ return d.s.lookup(d.tag, key)
+}
+
+func (b *Builder) lookup(tag language.Tag, key string) (data string, ok bool) {
+ return b.index.lookup(tag, key)
+}
+
+func (c *Builder) set(tag language.Tag, key string, s *store, msg ...Message) error {
+ data, err := catmsg.Compile(tag, &dict{&c.macros, tag}, firstInSequence(msg))
+
+ s.mutex.Lock()
+ defer s.mutex.Unlock()
+
+ m := s.index[tag]
+ if m == nil {
+ m = msgMap{}
+ if s.index == nil {
+ s.index = map[language.Tag]msgMap{}
+ }
+ c.matcher = nil
+ s.index[tag] = m
+ }
+
+ m[key] = data
+ return err
+}
+
+func (c *Builder) Matcher() language.Matcher {
+ c.index.mutex.RLock()
+ m := c.matcher
+ c.index.mutex.RUnlock()
+ if m != nil {
+ return m
+ }
+
+ c.index.mutex.Lock()
+ if c.matcher == nil {
+ c.matcher = language.NewMatcher(c.unlockedLanguages())
+ }
+ m = c.matcher
+ c.index.mutex.Unlock()
+ return m
+}
+
+type store struct {
+ mutex sync.RWMutex
+ index map[language.Tag]msgMap
+}
+
+type msgMap map[string]string
+
+func (s *store) lookup(tag language.Tag, key string) (data string, ok bool) {
+ s.mutex.RLock()
+ defer s.mutex.RUnlock()
+
+ for ; ; tag = tag.Parent() {
+ if msgs, ok := s.index[tag]; ok {
+ if msg, ok := msgs[key]; ok {
+ return msg, true
+ }
+ }
+ if tag == language.Und {
+ break
+ }
+ }
+ return "", false
+}
+
+// Languages returns all languages for which the Catalog contains variants.
+func (b *Builder) Languages() []language.Tag {
+ s := &b.index
+ s.mutex.RLock()
+ defer s.mutex.RUnlock()
+
+ return b.unlockedLanguages()
+}
+
+func (b *Builder) unlockedLanguages() []language.Tag {
+ s := &b.index
+ if len(s.index) == 0 {
+ return nil
+ }
+ tags := make([]language.Tag, 0, len(s.index))
+ _, hasFallback := s.index[b.options.fallback]
+ offset := 0
+ if hasFallback {
+ tags = append(tags, b.options.fallback)
+ offset = 1
+ }
+ for t := range s.index {
+ if t != b.options.fallback {
+ tags = append(tags, t)
+ }
+ }
+ internal.SortTags(tags[offset:])
+ return tags
+}
diff --git a/vendor/golang.org/x/text/message/catalog/go19.go b/vendor/golang.org/x/text/message/catalog/go19.go
new file mode 100644
index 0000000000..291a4df949
--- /dev/null
+++ b/vendor/golang.org/x/text/message/catalog/go19.go
@@ -0,0 +1,15 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.9
+
+package catalog
+
+import "golang.org/x/text/internal/catmsg"
+
+// A Message holds a collection of translations for the same phrase that may
+// vary based on the values of substitution arguments.
+type Message = catmsg.Message
+
+type firstInSequence = catmsg.FirstOf
diff --git a/vendor/golang.org/x/text/message/catalog/gopre19.go b/vendor/golang.org/x/text/message/catalog/gopre19.go
new file mode 100644
index 0000000000..da44ebb8be
--- /dev/null
+++ b/vendor/golang.org/x/text/message/catalog/gopre19.go
@@ -0,0 +1,23 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !go1.9
+
+package catalog
+
+import "golang.org/x/text/internal/catmsg"
+
+// A Message holds a collection of translations for the same phrase that may
+// vary based on the values of substitution arguments.
+type Message interface {
+ catmsg.Message
+}
+
+func firstInSequence(m []Message) catmsg.Message {
+ a := []catmsg.Message{}
+ for _, m := range m {
+ a = append(a, m)
+ }
+ return catmsg.FirstOf(a)
+}
diff --git a/vendor/golang.org/x/text/message/doc.go b/vendor/golang.org/x/text/message/doc.go
new file mode 100644
index 0000000000..4bf7bdcac3
--- /dev/null
+++ b/vendor/golang.org/x/text/message/doc.go
@@ -0,0 +1,99 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package message implements formatted I/O for localized strings with functions
+// analogous to the fmt's print functions. It is a drop-in replacement for fmt.
+//
+// # Localized Formatting
+//
+// A format string can be localized by replacing any of the print functions of
+// fmt with an equivalent call to a Printer.
+//
+// p := message.NewPrinter(message.MatchLanguage("en"))
+// p.Println(123456.78) // Prints 123,456.78
+//
+// p.Printf("%d ducks in a row", 4331) // Prints 4,331 ducks in a row
+//
+// p := message.NewPrinter(message.MatchLanguage("nl"))
+// p.Printf("Hoogte: %.1f meter", 1244.9) // Prints Hoogte: 1,244.9 meter
+//
+// p := message.NewPrinter(message.MatchLanguage("bn"))
+// p.Println(123456.78) // Prints ১,২৩,৪৫৬.৭৮
+//
+// Printer currently supports numbers and specialized types for which packages
+// exist in x/text. Other builtin types such as time.Time and slices are
+// planned.
+//
+// Format strings largely have the same meaning as with fmt with the following
+// notable exceptions:
+// - flag # always resorts to fmt for printing
+// - verb 'f', 'e', 'g', 'd' use localized formatting unless the '#' flag is
+// specified.
+// - verb 'm' inserts a translation of a string argument.
+//
+// See package fmt for more options.
+//
+// # Translation
+//
+// The format strings that are passed to Printf, Sprintf, Fprintf, or Errorf
+// are used as keys to look up translations for the specified languages.
+// More on how these need to be specified below.
+//
+// One can use arbitrary keys to distinguish between otherwise ambiguous
+// strings:
+//
+// p := message.NewPrinter(language.English)
+// p.Printf("archive(noun)") // Prints "archive"
+// p.Printf("archive(verb)") // Prints "archive"
+//
+// p := message.NewPrinter(language.German)
+// p.Printf("archive(noun)") // Prints "Archiv"
+// p.Printf("archive(verb)") // Prints "archivieren"
+//
+// To retain the fallback functionality, use Key:
+//
+// p.Printf(message.Key("archive(noun)", "archive"))
+// p.Printf(message.Key("archive(verb)", "archive"))
+//
+// # Translation Pipeline
+//
+// Format strings that contain text need to be translated to support different
+// locales. The first step is to extract strings that need to be translated.
+//
+// 1. Install gotext
+//
+// go get -u golang.org/x/text/cmd/gotext
+// gotext -help
+//
+// 2. Mark strings in your source to be translated by using message.Printer,
+// instead of the functions of the fmt package.
+//
+// 3. Extract the strings from your source
+//
+// gotext extract
+//
+// The output will be written to the textdata directory.
+//
+// 4. Send the files for translation
+//
+// It is planned to support multiple formats, but for now one will have to
+// rewrite the JSON output to the desired format.
+//
+// 5. Inject translations into program
+//
+// 6. Repeat from 2
+//
+// Right now this has to be done programmatically with calls to Set or
+// SetString. These functions as well as the methods defined in
+// see also package golang.org/x/text/message/catalog can be used to implement
+// either dynamic or static loading of messages.
+//
+// # Plural and Gender Forms
+//
+// Translated messages can vary based on the plural and gender forms of
+// substitution values. In general, it is up to the translators to provide
+// alternative translations for such forms. See the packages in
+// golang.org/x/text/feature and golang.org/x/text/message/catalog for more
+// information.
+package message
diff --git a/vendor/golang.org/x/text/message/format.go b/vendor/golang.org/x/text/message/format.go
new file mode 100644
index 0000000000..a47d17dd4d
--- /dev/null
+++ b/vendor/golang.org/x/text/message/format.go
@@ -0,0 +1,510 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package message
+
+import (
+ "bytes"
+ "strconv"
+ "unicode/utf8"
+
+ "golang.org/x/text/internal/format"
+)
+
+const (
+ ldigits = "0123456789abcdefx"
+ udigits = "0123456789ABCDEFX"
+)
+
+const (
+ signed = true
+ unsigned = false
+)
+
+// A formatInfo is the raw formatter used by Printf etc.
+// It prints into a buffer that must be set up separately.
+type formatInfo struct {
+ buf *bytes.Buffer
+
+ format.Parser
+
+ // intbuf is large enough to store %b of an int64 with a sign and
+ // avoids padding at the end of the struct on 32 bit architectures.
+ intbuf [68]byte
+}
+
+func (f *formatInfo) init(buf *bytes.Buffer) {
+ f.ClearFlags()
+ f.buf = buf
+}
+
+// writePadding generates n bytes of padding.
+func (f *formatInfo) writePadding(n int) {
+ if n <= 0 { // No padding bytes needed.
+ return
+ }
+ f.buf.Grow(n)
+ // Decide which byte the padding should be filled with.
+ padByte := byte(' ')
+ if f.Zero {
+ padByte = byte('0')
+ }
+ // Fill padding with padByte.
+ for i := 0; i < n; i++ {
+ f.buf.WriteByte(padByte) // TODO: make more efficient.
+ }
+}
+
+// pad appends b to f.buf, padded on left (!f.minus) or right (f.minus).
+func (f *formatInfo) pad(b []byte) {
+ if !f.WidthPresent || f.Width == 0 {
+ f.buf.Write(b)
+ return
+ }
+ width := f.Width - utf8.RuneCount(b)
+ if !f.Minus {
+ // left padding
+ f.writePadding(width)
+ f.buf.Write(b)
+ } else {
+ // right padding
+ f.buf.Write(b)
+ f.writePadding(width)
+ }
+}
+
+// padString appends s to f.buf, padded on left (!f.minus) or right (f.minus).
+func (f *formatInfo) padString(s string) {
+ if !f.WidthPresent || f.Width == 0 {
+ f.buf.WriteString(s)
+ return
+ }
+ width := f.Width - utf8.RuneCountInString(s)
+ if !f.Minus {
+ // left padding
+ f.writePadding(width)
+ f.buf.WriteString(s)
+ } else {
+ // right padding
+ f.buf.WriteString(s)
+ f.writePadding(width)
+ }
+}
+
+// fmt_boolean formats a boolean.
+func (f *formatInfo) fmt_boolean(v bool) {
+ if v {
+ f.padString("true")
+ } else {
+ f.padString("false")
+ }
+}
+
+// fmt_unicode formats a uint64 as "U+0078" or with f.sharp set as "U+0078 'x'".
+func (f *formatInfo) fmt_unicode(u uint64) {
+ buf := f.intbuf[0:]
+
+ // With default precision set the maximum needed buf length is 18
+ // for formatting -1 with %#U ("U+FFFFFFFFFFFFFFFF") which fits
+ // into the already allocated intbuf with a capacity of 68 bytes.
+ prec := 4
+ if f.PrecPresent && f.Prec > 4 {
+ prec = f.Prec
+ // Compute space needed for "U+" , number, " '", character, "'".
+ width := 2 + prec + 2 + utf8.UTFMax + 1
+ if width > len(buf) {
+ buf = make([]byte, width)
+ }
+ }
+
+ // Format into buf, ending at buf[i]. Formatting numbers is easier right-to-left.
+ i := len(buf)
+
+ // For %#U we want to add a space and a quoted character at the end of the buffer.
+ if f.Sharp && u <= utf8.MaxRune && strconv.IsPrint(rune(u)) {
+ i--
+ buf[i] = '\''
+ i -= utf8.RuneLen(rune(u))
+ utf8.EncodeRune(buf[i:], rune(u))
+ i--
+ buf[i] = '\''
+ i--
+ buf[i] = ' '
+ }
+ // Format the Unicode code point u as a hexadecimal number.
+ for u >= 16 {
+ i--
+ buf[i] = udigits[u&0xF]
+ prec--
+ u >>= 4
+ }
+ i--
+ buf[i] = udigits[u]
+ prec--
+ // Add zeros in front of the number until requested precision is reached.
+ for prec > 0 {
+ i--
+ buf[i] = '0'
+ prec--
+ }
+ // Add a leading "U+".
+ i--
+ buf[i] = '+'
+ i--
+ buf[i] = 'U'
+
+ oldZero := f.Zero
+ f.Zero = false
+ f.pad(buf[i:])
+ f.Zero = oldZero
+}
+
+// fmt_integer formats signed and unsigned integers.
+func (f *formatInfo) fmt_integer(u uint64, base int, isSigned bool, digits string) {
+ negative := isSigned && int64(u) < 0
+ if negative {
+ u = -u
+ }
+
+ buf := f.intbuf[0:]
+ // The already allocated f.intbuf with a capacity of 68 bytes
+ // is large enough for integer formatting when no precision or width is set.
+ if f.WidthPresent || f.PrecPresent {
+ // Account 3 extra bytes for possible addition of a sign and "0x".
+ width := 3 + f.Width + f.Prec // wid and prec are always positive.
+ if width > len(buf) {
+ // We're going to need a bigger boat.
+ buf = make([]byte, width)
+ }
+ }
+
+ // Two ways to ask for extra leading zero digits: %.3d or %03d.
+ // If both are specified the f.zero flag is ignored and
+ // padding with spaces is used instead.
+ prec := 0
+ if f.PrecPresent {
+ prec = f.Prec
+ // Precision of 0 and value of 0 means "print nothing" but padding.
+ if prec == 0 && u == 0 {
+ oldZero := f.Zero
+ f.Zero = false
+ f.writePadding(f.Width)
+ f.Zero = oldZero
+ return
+ }
+ } else if f.Zero && f.WidthPresent {
+ prec = f.Width
+ if negative || f.Plus || f.Space {
+ prec-- // leave room for sign
+ }
+ }
+
+ // Because printing is easier right-to-left: format u into buf, ending at buf[i].
+ // We could make things marginally faster by splitting the 32-bit case out
+ // into a separate block but it's not worth the duplication, so u has 64 bits.
+ i := len(buf)
+ // Use constants for the division and modulo for more efficient code.
+ // Switch cases ordered by popularity.
+ switch base {
+ case 10:
+ for u >= 10 {
+ i--
+ next := u / 10
+ buf[i] = byte('0' + u - next*10)
+ u = next
+ }
+ case 16:
+ for u >= 16 {
+ i--
+ buf[i] = digits[u&0xF]
+ u >>= 4
+ }
+ case 8:
+ for u >= 8 {
+ i--
+ buf[i] = byte('0' + u&7)
+ u >>= 3
+ }
+ case 2:
+ for u >= 2 {
+ i--
+ buf[i] = byte('0' + u&1)
+ u >>= 1
+ }
+ default:
+ panic("fmt: unknown base; can't happen")
+ }
+ i--
+ buf[i] = digits[u]
+ for i > 0 && prec > len(buf)-i {
+ i--
+ buf[i] = '0'
+ }
+
+ // Various prefixes: 0x, -, etc.
+ if f.Sharp {
+ switch base {
+ case 8:
+ if buf[i] != '0' {
+ i--
+ buf[i] = '0'
+ }
+ case 16:
+ // Add a leading 0x or 0X.
+ i--
+ buf[i] = digits[16]
+ i--
+ buf[i] = '0'
+ }
+ }
+
+ if negative {
+ i--
+ buf[i] = '-'
+ } else if f.Plus {
+ i--
+ buf[i] = '+'
+ } else if f.Space {
+ i--
+ buf[i] = ' '
+ }
+
+ // Left padding with zeros has already been handled like precision earlier
+ // or the f.zero flag is ignored due to an explicitly set precision.
+ oldZero := f.Zero
+ f.Zero = false
+ f.pad(buf[i:])
+ f.Zero = oldZero
+}
+
+// truncate truncates the string to the specified precision, if present.
+func (f *formatInfo) truncate(s string) string {
+ if f.PrecPresent {
+ n := f.Prec
+ for i := range s {
+ n--
+ if n < 0 {
+ return s[:i]
+ }
+ }
+ }
+ return s
+}
+
+// fmt_s formats a string.
+func (f *formatInfo) fmt_s(s string) {
+ s = f.truncate(s)
+ f.padString(s)
+}
+
+// fmt_sbx formats a string or byte slice as a hexadecimal encoding of its bytes.
+func (f *formatInfo) fmt_sbx(s string, b []byte, digits string) {
+ length := len(b)
+ if b == nil {
+ // No byte slice present. Assume string s should be encoded.
+ length = len(s)
+ }
+ // Set length to not process more bytes than the precision demands.
+ if f.PrecPresent && f.Prec < length {
+ length = f.Prec
+ }
+ // Compute width of the encoding taking into account the f.sharp and f.space flag.
+ width := 2 * length
+ if width > 0 {
+ if f.Space {
+ // Each element encoded by two hexadecimals will get a leading 0x or 0X.
+ if f.Sharp {
+ width *= 2
+ }
+ // Elements will be separated by a space.
+ width += length - 1
+ } else if f.Sharp {
+ // Only a leading 0x or 0X will be added for the whole string.
+ width += 2
+ }
+ } else { // The byte slice or string that should be encoded is empty.
+ if f.WidthPresent {
+ f.writePadding(f.Width)
+ }
+ return
+ }
+ // Handle padding to the left.
+ if f.WidthPresent && f.Width > width && !f.Minus {
+ f.writePadding(f.Width - width)
+ }
+ // Write the encoding directly into the output buffer.
+ buf := f.buf
+ if f.Sharp {
+ // Add leading 0x or 0X.
+ buf.WriteByte('0')
+ buf.WriteByte(digits[16])
+ }
+ var c byte
+ for i := 0; i < length; i++ {
+ if f.Space && i > 0 {
+ // Separate elements with a space.
+ buf.WriteByte(' ')
+ if f.Sharp {
+ // Add leading 0x or 0X for each element.
+ buf.WriteByte('0')
+ buf.WriteByte(digits[16])
+ }
+ }
+ if b != nil {
+ c = b[i] // Take a byte from the input byte slice.
+ } else {
+ c = s[i] // Take a byte from the input string.
+ }
+ // Encode each byte as two hexadecimal digits.
+ buf.WriteByte(digits[c>>4])
+ buf.WriteByte(digits[c&0xF])
+ }
+ // Handle padding to the right.
+ if f.WidthPresent && f.Width > width && f.Minus {
+ f.writePadding(f.Width - width)
+ }
+}
+
+// fmt_sx formats a string as a hexadecimal encoding of its bytes.
+func (f *formatInfo) fmt_sx(s, digits string) {
+ f.fmt_sbx(s, nil, digits)
+}
+
+// fmt_bx formats a byte slice as a hexadecimal encoding of its bytes.
+func (f *formatInfo) fmt_bx(b []byte, digits string) {
+ f.fmt_sbx("", b, digits)
+}
+
+// fmt_q formats a string as a double-quoted, escaped Go string constant.
+// If f.sharp is set a raw (backquoted) string may be returned instead
+// if the string does not contain any control characters other than tab.
+func (f *formatInfo) fmt_q(s string) {
+ s = f.truncate(s)
+ if f.Sharp && strconv.CanBackquote(s) {
+ f.padString("`" + s + "`")
+ return
+ }
+ buf := f.intbuf[:0]
+ if f.Plus {
+ f.pad(strconv.AppendQuoteToASCII(buf, s))
+ } else {
+ f.pad(strconv.AppendQuote(buf, s))
+ }
+}
+
+// fmt_c formats an integer as a Unicode character.
+// If the character is not valid Unicode, it will print '\ufffd'.
+func (f *formatInfo) fmt_c(c uint64) {
+ r := rune(c)
+ if c > utf8.MaxRune {
+ r = utf8.RuneError
+ }
+ buf := f.intbuf[:0]
+ w := utf8.EncodeRune(buf[:utf8.UTFMax], r)
+ f.pad(buf[:w])
+}
+
+// fmt_qc formats an integer as a single-quoted, escaped Go character constant.
+// If the character is not valid Unicode, it will print '\ufffd'.
+func (f *formatInfo) fmt_qc(c uint64) {
+ r := rune(c)
+ if c > utf8.MaxRune {
+ r = utf8.RuneError
+ }
+ buf := f.intbuf[:0]
+ if f.Plus {
+ f.pad(strconv.AppendQuoteRuneToASCII(buf, r))
+ } else {
+ f.pad(strconv.AppendQuoteRune(buf, r))
+ }
+}
+
+// fmt_float formats a float64. It assumes that verb is a valid format specifier
+// for strconv.AppendFloat and therefore fits into a byte.
+func (f *formatInfo) fmt_float(v float64, size int, verb rune, prec int) {
+ // Explicit precision in format specifier overrules default precision.
+ if f.PrecPresent {
+ prec = f.Prec
+ }
+ // Format number, reserving space for leading + sign if needed.
+ num := strconv.AppendFloat(f.intbuf[:1], v, byte(verb), prec, size)
+ if num[1] == '-' || num[1] == '+' {
+ num = num[1:]
+ } else {
+ num[0] = '+'
+ }
+ // f.space means to add a leading space instead of a "+" sign unless
+ // the sign is explicitly asked for by f.plus.
+ if f.Space && num[0] == '+' && !f.Plus {
+ num[0] = ' '
+ }
+ // Special handling for infinities and NaN,
+ // which don't look like a number so shouldn't be padded with zeros.
+ if num[1] == 'I' || num[1] == 'N' {
+ oldZero := f.Zero
+ f.Zero = false
+ // Remove sign before NaN if not asked for.
+ if num[1] == 'N' && !f.Space && !f.Plus {
+ num = num[1:]
+ }
+ f.pad(num)
+ f.Zero = oldZero
+ return
+ }
+ // The sharp flag forces printing a decimal point for non-binary formats
+ // and retains trailing zeros, which we may need to restore.
+ if f.Sharp && verb != 'b' {
+ digits := 0
+ switch verb {
+ case 'v', 'g', 'G':
+ digits = prec
+ // If no precision is set explicitly use a precision of 6.
+ if digits == -1 {
+ digits = 6
+ }
+ }
+
+ // Buffer pre-allocated with enough room for
+ // exponent notations of the form "e+123".
+ var tailBuf [5]byte
+ tail := tailBuf[:0]
+
+ hasDecimalPoint := false
+ // Starting from i = 1 to skip sign at num[0].
+ for i := 1; i < len(num); i++ {
+ switch num[i] {
+ case '.':
+ hasDecimalPoint = true
+ case 'e', 'E':
+ tail = append(tail, num[i:]...)
+ num = num[:i]
+ default:
+ digits--
+ }
+ }
+ if !hasDecimalPoint {
+ num = append(num, '.')
+ }
+ for digits > 0 {
+ num = append(num, '0')
+ digits--
+ }
+ num = append(num, tail...)
+ }
+ // We want a sign if asked for and if the sign is not positive.
+ if f.Plus || num[0] != '+' {
+ // If we're zero padding to the left we want the sign before the leading zeros.
+ // Achieve this by writing the sign out and then padding the unsigned number.
+ if f.Zero && f.WidthPresent && f.Width > len(num) {
+ f.buf.WriteByte(num[0])
+ f.writePadding(f.Width - len(num))
+ f.buf.Write(num[1:])
+ return
+ }
+ f.pad(num)
+ return
+ }
+ // No sign to show and the number is positive; just print the unsigned number.
+ f.pad(num[1:])
+}
diff --git a/vendor/golang.org/x/text/message/message.go b/vendor/golang.org/x/text/message/message.go
new file mode 100644
index 0000000000..48d76630ca
--- /dev/null
+++ b/vendor/golang.org/x/text/message/message.go
@@ -0,0 +1,193 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package message // import "golang.org/x/text/message"
+
+import (
+ "io"
+ "os"
+
+ // Include features to facilitate generated catalogs.
+ _ "golang.org/x/text/feature/plural"
+
+ "golang.org/x/text/internal/number"
+ "golang.org/x/text/language"
+ "golang.org/x/text/message/catalog"
+)
+
+// A Printer implements language-specific formatted I/O analogous to the fmt
+// package.
+type Printer struct {
+ // the language
+ tag language.Tag
+
+ toDecimal number.Formatter
+ toScientific number.Formatter
+
+ cat catalog.Catalog
+}
+
+type options struct {
+ cat catalog.Catalog
+ // TODO:
+ // - allow %s to print integers in written form (tables are likely too large
+ // to enable this by default).
+ // - list behavior
+ //
+}
+
+// An Option defines an option of a Printer.
+type Option func(o *options)
+
+// Catalog defines the catalog to be used.
+func Catalog(c catalog.Catalog) Option {
+ return func(o *options) { o.cat = c }
+}
+
+// NewPrinter returns a Printer that formats messages tailored to language t.
+func NewPrinter(t language.Tag, opts ...Option) *Printer {
+ options := &options{
+ cat: DefaultCatalog,
+ }
+ for _, o := range opts {
+ o(options)
+ }
+ p := &Printer{
+ tag: t,
+ cat: options.cat,
+ }
+ p.toDecimal.InitDecimal(t)
+ p.toScientific.InitScientific(t)
+ return p
+}
+
+// Sprint is like fmt.Sprint, but using language-specific formatting.
+func (p *Printer) Sprint(a ...interface{}) string {
+ pp := newPrinter(p)
+ pp.doPrint(a)
+ s := pp.String()
+ pp.free()
+ return s
+}
+
+// Fprint is like fmt.Fprint, but using language-specific formatting.
+func (p *Printer) Fprint(w io.Writer, a ...interface{}) (n int, err error) {
+ pp := newPrinter(p)
+ pp.doPrint(a)
+ n64, err := io.Copy(w, &pp.Buffer)
+ pp.free()
+ return int(n64), err
+}
+
+// Print is like fmt.Print, but using language-specific formatting.
+func (p *Printer) Print(a ...interface{}) (n int, err error) {
+ return p.Fprint(os.Stdout, a...)
+}
+
+// Sprintln is like fmt.Sprintln, but using language-specific formatting.
+func (p *Printer) Sprintln(a ...interface{}) string {
+ pp := newPrinter(p)
+ pp.doPrintln(a)
+ s := pp.String()
+ pp.free()
+ return s
+}
+
+// Fprintln is like fmt.Fprintln, but using language-specific formatting.
+func (p *Printer) Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
+ pp := newPrinter(p)
+ pp.doPrintln(a)
+ n64, err := io.Copy(w, &pp.Buffer)
+ pp.free()
+ return int(n64), err
+}
+
+// Println is like fmt.Println, but using language-specific formatting.
+func (p *Printer) Println(a ...interface{}) (n int, err error) {
+ return p.Fprintln(os.Stdout, a...)
+}
+
+// Sprintf is like fmt.Sprintf, but using language-specific formatting.
+func (p *Printer) Sprintf(key Reference, a ...interface{}) string {
+ pp := newPrinter(p)
+ lookupAndFormat(pp, key, a)
+ s := pp.String()
+ pp.free()
+ return s
+}
+
+// Fprintf is like fmt.Fprintf, but using language-specific formatting.
+func (p *Printer) Fprintf(w io.Writer, key Reference, a ...interface{}) (n int, err error) {
+ pp := newPrinter(p)
+ lookupAndFormat(pp, key, a)
+ n, err = w.Write(pp.Bytes())
+ pp.free()
+ return n, err
+
+}
+
+// Printf is like fmt.Printf, but using language-specific formatting.
+func (p *Printer) Printf(key Reference, a ...interface{}) (n int, err error) {
+ pp := newPrinter(p)
+ lookupAndFormat(pp, key, a)
+ n, err = os.Stdout.Write(pp.Bytes())
+ pp.free()
+ return n, err
+}
+
+func lookupAndFormat(p *printer, r Reference, a []interface{}) {
+ p.fmt.Reset(a)
+ var id, msg string
+ switch v := r.(type) {
+ case string:
+ id, msg = v, v
+ case key:
+ id, msg = v.id, v.fallback
+ default:
+ panic("key argument is not a Reference")
+ }
+
+ if p.catContext.Execute(id) == catalog.ErrNotFound {
+ if p.catContext.Execute(msg) == catalog.ErrNotFound {
+ p.Render(msg)
+ return
+ }
+ }
+}
+
+type rawPrinter struct {
+ p *printer
+}
+
+func (p rawPrinter) Render(msg string) { p.p.WriteString(msg) }
+func (p rawPrinter) Arg(i int) interface{} { return nil }
+
+// Arg implements catmsg.Renderer.
+func (p *printer) Arg(i int) interface{} { // TODO, also return "ok" bool
+ i--
+ if uint(i) < uint(len(p.fmt.Args)) {
+ return p.fmt.Args[i]
+ }
+ return nil
+}
+
+// Render implements catmsg.Renderer.
+func (p *printer) Render(msg string) {
+ p.doPrintf(msg)
+}
+
+// A Reference is a string or a message reference.
+type Reference interface {
+ // TODO: also allow []string
+}
+
+// Key creates a message Reference for a message where the given id is used for
+// message lookup and the fallback is returned when no matches are found.
+func Key(id string, fallback string) Reference {
+ return key{id, fallback}
+}
+
+type key struct {
+ id, fallback string
+}
diff --git a/vendor/golang.org/x/text/message/print.go b/vendor/golang.org/x/text/message/print.go
new file mode 100644
index 0000000000..da304cc0ed
--- /dev/null
+++ b/vendor/golang.org/x/text/message/print.go
@@ -0,0 +1,984 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package message
+
+import (
+ "bytes"
+ "fmt" // TODO: consider copying interfaces from package fmt to avoid dependency.
+ "math"
+ "reflect"
+ "sync"
+ "unicode/utf8"
+
+ "golang.org/x/text/internal/format"
+ "golang.org/x/text/internal/number"
+ "golang.org/x/text/language"
+ "golang.org/x/text/message/catalog"
+)
+
+// Strings for use with buffer.WriteString.
+// This is less overhead than using buffer.Write with byte arrays.
+const (
+ commaSpaceString = ", "
+ nilAngleString = ""
+ nilParenString = "(nil)"
+ nilString = "nil"
+ mapString = "map["
+ percentBangString = "%!"
+ missingString = "(MISSING)"
+ badIndexString = "(BADINDEX)"
+ panicString = "(PANIC="
+ extraString = "%!(EXTRA "
+ badWidthString = "%!(BADWIDTH)"
+ badPrecString = "%!(BADPREC)"
+ noVerbString = "%!(NOVERB)"
+
+ invReflectString = ""
+)
+
+var printerPool = sync.Pool{
+ New: func() interface{} { return new(printer) },
+}
+
+// newPrinter allocates a new printer struct or grabs a cached one.
+func newPrinter(pp *Printer) *printer {
+ p := printerPool.Get().(*printer)
+ p.Printer = *pp
+ // TODO: cache most of the following call.
+ p.catContext = pp.cat.Context(pp.tag, p)
+
+ p.panicking = false
+ p.erroring = false
+ p.fmt.init(&p.Buffer)
+ return p
+}
+
+// free saves used printer structs in printerFree; avoids an allocation per invocation.
+func (p *printer) free() {
+ p.Buffer.Reset()
+ p.arg = nil
+ p.value = reflect.Value{}
+ printerPool.Put(p)
+}
+
+// printer is used to store a printer's state.
+// It implements "golang.org/x/text/internal/format".State.
+type printer struct {
+ Printer
+
+ // the context for looking up message translations
+ catContext *catalog.Context
+
+ // buffer for accumulating output.
+ bytes.Buffer
+
+ // arg holds the current item, as an interface{}.
+ arg interface{}
+ // value is used instead of arg for reflect values.
+ value reflect.Value
+
+ // fmt is used to format basic items such as integers or strings.
+ fmt formatInfo
+
+ // panicking is set by catchPanic to avoid infinite panic, recover, panic, ... recursion.
+ panicking bool
+ // erroring is set when printing an error string to guard against calling handleMethods.
+ erroring bool
+}
+
+// Language implements "golang.org/x/text/internal/format".State.
+func (p *printer) Language() language.Tag { return p.tag }
+
+func (p *printer) Width() (wid int, ok bool) { return p.fmt.Width, p.fmt.WidthPresent }
+
+func (p *printer) Precision() (prec int, ok bool) { return p.fmt.Prec, p.fmt.PrecPresent }
+
+func (p *printer) Flag(b int) bool {
+ switch b {
+ case '-':
+ return p.fmt.Minus
+ case '+':
+ return p.fmt.Plus || p.fmt.PlusV
+ case '#':
+ return p.fmt.Sharp || p.fmt.SharpV
+ case ' ':
+ return p.fmt.Space
+ case '0':
+ return p.fmt.Zero
+ }
+ return false
+}
+
+// getField gets the i'th field of the struct value.
+// If the field is itself is an interface, return a value for
+// the thing inside the interface, not the interface itself.
+func getField(v reflect.Value, i int) reflect.Value {
+ val := v.Field(i)
+ if val.Kind() == reflect.Interface && !val.IsNil() {
+ val = val.Elem()
+ }
+ return val
+}
+
+func (p *printer) unknownType(v reflect.Value) {
+ if !v.IsValid() {
+ p.WriteString(nilAngleString)
+ return
+ }
+ p.WriteByte('?')
+ p.WriteString(v.Type().String())
+ p.WriteByte('?')
+}
+
+func (p *printer) badVerb(verb rune) {
+ p.erroring = true
+ p.WriteString(percentBangString)
+ p.WriteRune(verb)
+ p.WriteByte('(')
+ switch {
+ case p.arg != nil:
+ p.WriteString(reflect.TypeOf(p.arg).String())
+ p.WriteByte('=')
+ p.printArg(p.arg, 'v')
+ case p.value.IsValid():
+ p.WriteString(p.value.Type().String())
+ p.WriteByte('=')
+ p.printValue(p.value, 'v', 0)
+ default:
+ p.WriteString(nilAngleString)
+ }
+ p.WriteByte(')')
+ p.erroring = false
+}
+
+func (p *printer) fmtBool(v bool, verb rune) {
+ switch verb {
+ case 't', 'v':
+ p.fmt.fmt_boolean(v)
+ default:
+ p.badVerb(verb)
+ }
+}
+
+// fmt0x64 formats a uint64 in hexadecimal and prefixes it with 0x or
+// not, as requested, by temporarily setting the sharp flag.
+func (p *printer) fmt0x64(v uint64, leading0x bool) {
+ sharp := p.fmt.Sharp
+ p.fmt.Sharp = leading0x
+ p.fmt.fmt_integer(v, 16, unsigned, ldigits)
+ p.fmt.Sharp = sharp
+}
+
+// fmtInteger formats a signed or unsigned integer.
+func (p *printer) fmtInteger(v uint64, isSigned bool, verb rune) {
+ switch verb {
+ case 'v':
+ if p.fmt.SharpV && !isSigned {
+ p.fmt0x64(v, true)
+ return
+ }
+ fallthrough
+ case 'd':
+ if p.fmt.Sharp || p.fmt.SharpV {
+ p.fmt.fmt_integer(v, 10, isSigned, ldigits)
+ } else {
+ p.fmtDecimalInt(v, isSigned)
+ }
+ case 'b':
+ p.fmt.fmt_integer(v, 2, isSigned, ldigits)
+ case 'o':
+ p.fmt.fmt_integer(v, 8, isSigned, ldigits)
+ case 'x':
+ p.fmt.fmt_integer(v, 16, isSigned, ldigits)
+ case 'X':
+ p.fmt.fmt_integer(v, 16, isSigned, udigits)
+ case 'c':
+ p.fmt.fmt_c(v)
+ case 'q':
+ if v <= utf8.MaxRune {
+ p.fmt.fmt_qc(v)
+ } else {
+ p.badVerb(verb)
+ }
+ case 'U':
+ p.fmt.fmt_unicode(v)
+ default:
+ p.badVerb(verb)
+ }
+}
+
+// fmtFloat formats a float. The default precision for each verb
+// is specified as last argument in the call to fmt_float.
+func (p *printer) fmtFloat(v float64, size int, verb rune) {
+ switch verb {
+ case 'b':
+ p.fmt.fmt_float(v, size, verb, -1)
+ case 'v':
+ verb = 'g'
+ fallthrough
+ case 'g', 'G':
+ if p.fmt.Sharp || p.fmt.SharpV {
+ p.fmt.fmt_float(v, size, verb, -1)
+ } else {
+ p.fmtVariableFloat(v, size)
+ }
+ case 'e', 'E':
+ if p.fmt.Sharp || p.fmt.SharpV {
+ p.fmt.fmt_float(v, size, verb, 6)
+ } else {
+ p.fmtScientific(v, size, 6)
+ }
+ case 'f', 'F':
+ if p.fmt.Sharp || p.fmt.SharpV {
+ p.fmt.fmt_float(v, size, verb, 6)
+ } else {
+ p.fmtDecimalFloat(v, size, 6)
+ }
+ default:
+ p.badVerb(verb)
+ }
+}
+
+func (p *printer) setFlags(f *number.Formatter) {
+ f.Flags &^= number.ElideSign
+ if p.fmt.Plus || p.fmt.Space {
+ f.Flags |= number.AlwaysSign
+ if !p.fmt.Plus {
+ f.Flags |= number.ElideSign
+ }
+ } else {
+ f.Flags &^= number.AlwaysSign
+ }
+}
+
+func (p *printer) updatePadding(f *number.Formatter) {
+ f.Flags &^= number.PadMask
+ if p.fmt.Minus {
+ f.Flags |= number.PadAfterSuffix
+ } else {
+ f.Flags |= number.PadBeforePrefix
+ }
+ f.PadRune = ' '
+ f.FormatWidth = uint16(p.fmt.Width)
+}
+
+func (p *printer) initDecimal(minFrac, maxFrac int) {
+ f := &p.toDecimal
+ f.MinIntegerDigits = 1
+ f.MaxIntegerDigits = 0
+ f.MinFractionDigits = uint8(minFrac)
+ f.MaxFractionDigits = int16(maxFrac)
+ p.setFlags(f)
+ f.PadRune = 0
+ if p.fmt.WidthPresent {
+ if p.fmt.Zero {
+ wid := p.fmt.Width
+ // Use significant integers for this.
+ // TODO: this is not the same as width, but so be it.
+ if f.MinFractionDigits > 0 {
+ wid -= 1 + int(f.MinFractionDigits)
+ }
+ if p.fmt.Plus || p.fmt.Space {
+ wid--
+ }
+ if wid > 0 && wid > int(f.MinIntegerDigits) {
+ f.MinIntegerDigits = uint8(wid)
+ }
+ }
+ p.updatePadding(f)
+ }
+}
+
+func (p *printer) initScientific(minFrac, maxFrac int) {
+ f := &p.toScientific
+ if maxFrac < 0 {
+ f.SetPrecision(maxFrac)
+ } else {
+ f.SetPrecision(maxFrac + 1)
+ f.MinFractionDigits = uint8(minFrac)
+ f.MaxFractionDigits = int16(maxFrac)
+ }
+ f.MinExponentDigits = 2
+ p.setFlags(f)
+ f.PadRune = 0
+ if p.fmt.WidthPresent {
+ f.Flags &^= number.PadMask
+ if p.fmt.Zero {
+ f.PadRune = f.Digit(0)
+ f.Flags |= number.PadAfterPrefix
+ } else {
+ f.PadRune = ' '
+ f.Flags |= number.PadBeforePrefix
+ }
+ p.updatePadding(f)
+ }
+}
+
+func (p *printer) fmtDecimalInt(v uint64, isSigned bool) {
+ var d number.Decimal
+
+ f := &p.toDecimal
+ if p.fmt.PrecPresent {
+ p.setFlags(f)
+ f.MinIntegerDigits = uint8(p.fmt.Prec)
+ f.MaxIntegerDigits = 0
+ f.MinFractionDigits = 0
+ f.MaxFractionDigits = 0
+ if p.fmt.WidthPresent {
+ p.updatePadding(f)
+ }
+ } else {
+ p.initDecimal(0, 0)
+ }
+ d.ConvertInt(p.toDecimal.RoundingContext, isSigned, v)
+
+ out := p.toDecimal.Format([]byte(nil), &d)
+ p.Buffer.Write(out)
+}
+
+func (p *printer) fmtDecimalFloat(v float64, size, prec int) {
+ var d number.Decimal
+ if p.fmt.PrecPresent {
+ prec = p.fmt.Prec
+ }
+ p.initDecimal(prec, prec)
+ d.ConvertFloat(p.toDecimal.RoundingContext, v, size)
+
+ out := p.toDecimal.Format([]byte(nil), &d)
+ p.Buffer.Write(out)
+}
+
+func (p *printer) fmtVariableFloat(v float64, size int) {
+ prec := -1
+ if p.fmt.PrecPresent {
+ prec = p.fmt.Prec
+ }
+ var d number.Decimal
+ p.initScientific(0, prec)
+ d.ConvertFloat(p.toScientific.RoundingContext, v, size)
+
+ // Copy logic of 'g' formatting from strconv. It is simplified a bit as
+ // we don't have to mind having prec > len(d.Digits).
+ shortest := prec < 0
+ ePrec := prec
+ if shortest {
+ prec = len(d.Digits)
+ ePrec = 6
+ } else if prec == 0 {
+ prec = 1
+ ePrec = 1
+ }
+ exp := int(d.Exp) - 1
+ if exp < -4 || exp >= ePrec {
+ p.initScientific(0, prec)
+
+ out := p.toScientific.Format([]byte(nil), &d)
+ p.Buffer.Write(out)
+ } else {
+ if prec > int(d.Exp) {
+ prec = len(d.Digits)
+ }
+ if prec -= int(d.Exp); prec < 0 {
+ prec = 0
+ }
+ p.initDecimal(0, prec)
+
+ out := p.toDecimal.Format([]byte(nil), &d)
+ p.Buffer.Write(out)
+ }
+}
+
+func (p *printer) fmtScientific(v float64, size, prec int) {
+ var d number.Decimal
+ if p.fmt.PrecPresent {
+ prec = p.fmt.Prec
+ }
+ p.initScientific(prec, prec)
+ rc := p.toScientific.RoundingContext
+ d.ConvertFloat(rc, v, size)
+
+ out := p.toScientific.Format([]byte(nil), &d)
+ p.Buffer.Write(out)
+
+}
+
+// fmtComplex formats a complex number v with
+// r = real(v) and j = imag(v) as (r+ji) using
+// fmtFloat for r and j formatting.
+func (p *printer) fmtComplex(v complex128, size int, verb rune) {
+ // Make sure any unsupported verbs are found before the
+ // calls to fmtFloat to not generate an incorrect error string.
+ switch verb {
+ case 'v', 'b', 'g', 'G', 'f', 'F', 'e', 'E':
+ p.WriteByte('(')
+ p.fmtFloat(real(v), size/2, verb)
+ // Imaginary part always has a sign.
+ if math.IsNaN(imag(v)) {
+ // By CLDR's rules, NaNs do not use patterns or signs. As this code
+ // relies on AlwaysSign working for imaginary parts, we need to
+ // manually handle NaNs.
+ f := &p.toScientific
+ p.setFlags(f)
+ p.updatePadding(f)
+ p.setFlags(f)
+ nan := f.Symbol(number.SymNan)
+ extra := 0
+ if w, ok := p.Width(); ok {
+ extra = w - utf8.RuneCountInString(nan) - 1
+ }
+ if f.Flags&number.PadAfterNumber == 0 {
+ for ; extra > 0; extra-- {
+ p.WriteRune(f.PadRune)
+ }
+ }
+ p.WriteString(f.Symbol(number.SymPlusSign))
+ p.WriteString(nan)
+ for ; extra > 0; extra-- {
+ p.WriteRune(f.PadRune)
+ }
+ p.WriteString("i)")
+ return
+ }
+ oldPlus := p.fmt.Plus
+ p.fmt.Plus = true
+ p.fmtFloat(imag(v), size/2, verb)
+ p.WriteString("i)") // TODO: use symbol?
+ p.fmt.Plus = oldPlus
+ default:
+ p.badVerb(verb)
+ }
+}
+
+func (p *printer) fmtString(v string, verb rune) {
+ switch verb {
+ case 'v':
+ if p.fmt.SharpV {
+ p.fmt.fmt_q(v)
+ } else {
+ p.fmt.fmt_s(v)
+ }
+ case 's':
+ p.fmt.fmt_s(v)
+ case 'x':
+ p.fmt.fmt_sx(v, ldigits)
+ case 'X':
+ p.fmt.fmt_sx(v, udigits)
+ case 'q':
+ p.fmt.fmt_q(v)
+ case 'm':
+ ctx := p.cat.Context(p.tag, rawPrinter{p})
+ if ctx.Execute(v) == catalog.ErrNotFound {
+ p.WriteString(v)
+ }
+ default:
+ p.badVerb(verb)
+ }
+}
+
+func (p *printer) fmtBytes(v []byte, verb rune, typeString string) {
+ switch verb {
+ case 'v', 'd':
+ if p.fmt.SharpV {
+ p.WriteString(typeString)
+ if v == nil {
+ p.WriteString(nilParenString)
+ return
+ }
+ p.WriteByte('{')
+ for i, c := range v {
+ if i > 0 {
+ p.WriteString(commaSpaceString)
+ }
+ p.fmt0x64(uint64(c), true)
+ }
+ p.WriteByte('}')
+ } else {
+ p.WriteByte('[')
+ for i, c := range v {
+ if i > 0 {
+ p.WriteByte(' ')
+ }
+ p.fmt.fmt_integer(uint64(c), 10, unsigned, ldigits)
+ }
+ p.WriteByte(']')
+ }
+ case 's':
+ p.fmt.fmt_s(string(v))
+ case 'x':
+ p.fmt.fmt_bx(v, ldigits)
+ case 'X':
+ p.fmt.fmt_bx(v, udigits)
+ case 'q':
+ p.fmt.fmt_q(string(v))
+ default:
+ p.printValue(reflect.ValueOf(v), verb, 0)
+ }
+}
+
+func (p *printer) fmtPointer(value reflect.Value, verb rune) {
+ var u uintptr
+ switch value.Kind() {
+ case reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr, reflect.Slice, reflect.UnsafePointer:
+ u = value.Pointer()
+ default:
+ p.badVerb(verb)
+ return
+ }
+
+ switch verb {
+ case 'v':
+ if p.fmt.SharpV {
+ p.WriteByte('(')
+ p.WriteString(value.Type().String())
+ p.WriteString(")(")
+ if u == 0 {
+ p.WriteString(nilString)
+ } else {
+ p.fmt0x64(uint64(u), true)
+ }
+ p.WriteByte(')')
+ } else {
+ if u == 0 {
+ p.fmt.padString(nilAngleString)
+ } else {
+ p.fmt0x64(uint64(u), !p.fmt.Sharp)
+ }
+ }
+ case 'p':
+ p.fmt0x64(uint64(u), !p.fmt.Sharp)
+ case 'b', 'o', 'd', 'x', 'X':
+ if verb == 'd' {
+ p.fmt.Sharp = true // Print as standard go. TODO: does this make sense?
+ }
+ p.fmtInteger(uint64(u), unsigned, verb)
+ default:
+ p.badVerb(verb)
+ }
+}
+
+func (p *printer) catchPanic(arg interface{}, verb rune) {
+ if err := recover(); err != nil {
+ // If it's a nil pointer, just say "". The likeliest causes are a
+ // Stringer that fails to guard against nil or a nil pointer for a
+ // value receiver, and in either case, "" is a nice result.
+ if v := reflect.ValueOf(arg); v.Kind() == reflect.Ptr && v.IsNil() {
+ p.WriteString(nilAngleString)
+ return
+ }
+ // Otherwise print a concise panic message. Most of the time the panic
+ // value will print itself nicely.
+ if p.panicking {
+ // Nested panics; the recursion in printArg cannot succeed.
+ panic(err)
+ }
+
+ oldFlags := p.fmt.Parser
+ // For this output we want default behavior.
+ p.fmt.ClearFlags()
+
+ p.WriteString(percentBangString)
+ p.WriteRune(verb)
+ p.WriteString(panicString)
+ p.panicking = true
+ p.printArg(err, 'v')
+ p.panicking = false
+ p.WriteByte(')')
+
+ p.fmt.Parser = oldFlags
+ }
+}
+
+func (p *printer) handleMethods(verb rune) (handled bool) {
+ if p.erroring {
+ return
+ }
+ // Is it a Formatter?
+ if formatter, ok := p.arg.(format.Formatter); ok {
+ handled = true
+ defer p.catchPanic(p.arg, verb)
+ formatter.Format(p, verb)
+ return
+ }
+ if formatter, ok := p.arg.(fmt.Formatter); ok {
+ handled = true
+ defer p.catchPanic(p.arg, verb)
+ formatter.Format(p, verb)
+ return
+ }
+
+ // If we're doing Go syntax and the argument knows how to supply it, take care of it now.
+ if p.fmt.SharpV {
+ if stringer, ok := p.arg.(fmt.GoStringer); ok {
+ handled = true
+ defer p.catchPanic(p.arg, verb)
+ // Print the result of GoString unadorned.
+ p.fmt.fmt_s(stringer.GoString())
+ return
+ }
+ } else {
+ // If a string is acceptable according to the format, see if
+ // the value satisfies one of the string-valued interfaces.
+ // Println etc. set verb to %v, which is "stringable".
+ switch verb {
+ case 'v', 's', 'x', 'X', 'q':
+ // Is it an error or Stringer?
+ // The duplication in the bodies is necessary:
+ // setting handled and deferring catchPanic
+ // must happen before calling the method.
+ switch v := p.arg.(type) {
+ case error:
+ handled = true
+ defer p.catchPanic(p.arg, verb)
+ p.fmtString(v.Error(), verb)
+ return
+
+ case fmt.Stringer:
+ handled = true
+ defer p.catchPanic(p.arg, verb)
+ p.fmtString(v.String(), verb)
+ return
+ }
+ }
+ }
+ return false
+}
+
+func (p *printer) printArg(arg interface{}, verb rune) {
+ p.arg = arg
+ p.value = reflect.Value{}
+
+ if arg == nil {
+ switch verb {
+ case 'T', 'v':
+ p.fmt.padString(nilAngleString)
+ default:
+ p.badVerb(verb)
+ }
+ return
+ }
+
+ // Special processing considerations.
+ // %T (the value's type) and %p (its address) are special; we always do them first.
+ switch verb {
+ case 'T':
+ p.fmt.fmt_s(reflect.TypeOf(arg).String())
+ return
+ case 'p':
+ p.fmtPointer(reflect.ValueOf(arg), 'p')
+ return
+ }
+
+ // Some types can be done without reflection.
+ switch f := arg.(type) {
+ case bool:
+ p.fmtBool(f, verb)
+ case float32:
+ p.fmtFloat(float64(f), 32, verb)
+ case float64:
+ p.fmtFloat(f, 64, verb)
+ case complex64:
+ p.fmtComplex(complex128(f), 64, verb)
+ case complex128:
+ p.fmtComplex(f, 128, verb)
+ case int:
+ p.fmtInteger(uint64(f), signed, verb)
+ case int8:
+ p.fmtInteger(uint64(f), signed, verb)
+ case int16:
+ p.fmtInteger(uint64(f), signed, verb)
+ case int32:
+ p.fmtInteger(uint64(f), signed, verb)
+ case int64:
+ p.fmtInteger(uint64(f), signed, verb)
+ case uint:
+ p.fmtInteger(uint64(f), unsigned, verb)
+ case uint8:
+ p.fmtInteger(uint64(f), unsigned, verb)
+ case uint16:
+ p.fmtInteger(uint64(f), unsigned, verb)
+ case uint32:
+ p.fmtInteger(uint64(f), unsigned, verb)
+ case uint64:
+ p.fmtInteger(f, unsigned, verb)
+ case uintptr:
+ p.fmtInteger(uint64(f), unsigned, verb)
+ case string:
+ p.fmtString(f, verb)
+ case []byte:
+ p.fmtBytes(f, verb, "[]byte")
+ case reflect.Value:
+ // Handle extractable values with special methods
+ // since printValue does not handle them at depth 0.
+ if f.IsValid() && f.CanInterface() {
+ p.arg = f.Interface()
+ if p.handleMethods(verb) {
+ return
+ }
+ }
+ p.printValue(f, verb, 0)
+ default:
+ // If the type is not simple, it might have methods.
+ if !p.handleMethods(verb) {
+ // Need to use reflection, since the type had no
+ // interface methods that could be used for formatting.
+ p.printValue(reflect.ValueOf(f), verb, 0)
+ }
+ }
+}
+
+// printValue is similar to printArg but starts with a reflect value, not an interface{} value.
+// It does not handle 'p' and 'T' verbs because these should have been already handled by printArg.
+func (p *printer) printValue(value reflect.Value, verb rune, depth int) {
+ // Handle values with special methods if not already handled by printArg (depth == 0).
+ if depth > 0 && value.IsValid() && value.CanInterface() {
+ p.arg = value.Interface()
+ if p.handleMethods(verb) {
+ return
+ }
+ }
+ p.arg = nil
+ p.value = value
+
+ switch f := value; value.Kind() {
+ case reflect.Invalid:
+ if depth == 0 {
+ p.WriteString(invReflectString)
+ } else {
+ switch verb {
+ case 'v':
+ p.WriteString(nilAngleString)
+ default:
+ p.badVerb(verb)
+ }
+ }
+ case reflect.Bool:
+ p.fmtBool(f.Bool(), verb)
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ p.fmtInteger(uint64(f.Int()), signed, verb)
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ p.fmtInteger(f.Uint(), unsigned, verb)
+ case reflect.Float32:
+ p.fmtFloat(f.Float(), 32, verb)
+ case reflect.Float64:
+ p.fmtFloat(f.Float(), 64, verb)
+ case reflect.Complex64:
+ p.fmtComplex(f.Complex(), 64, verb)
+ case reflect.Complex128:
+ p.fmtComplex(f.Complex(), 128, verb)
+ case reflect.String:
+ p.fmtString(f.String(), verb)
+ case reflect.Map:
+ if p.fmt.SharpV {
+ p.WriteString(f.Type().String())
+ if f.IsNil() {
+ p.WriteString(nilParenString)
+ return
+ }
+ p.WriteByte('{')
+ } else {
+ p.WriteString(mapString)
+ }
+ keys := f.MapKeys()
+ for i, key := range keys {
+ if i > 0 {
+ if p.fmt.SharpV {
+ p.WriteString(commaSpaceString)
+ } else {
+ p.WriteByte(' ')
+ }
+ }
+ p.printValue(key, verb, depth+1)
+ p.WriteByte(':')
+ p.printValue(f.MapIndex(key), verb, depth+1)
+ }
+ if p.fmt.SharpV {
+ p.WriteByte('}')
+ } else {
+ p.WriteByte(']')
+ }
+ case reflect.Struct:
+ if p.fmt.SharpV {
+ p.WriteString(f.Type().String())
+ }
+ p.WriteByte('{')
+ for i := 0; i < f.NumField(); i++ {
+ if i > 0 {
+ if p.fmt.SharpV {
+ p.WriteString(commaSpaceString)
+ } else {
+ p.WriteByte(' ')
+ }
+ }
+ if p.fmt.PlusV || p.fmt.SharpV {
+ if name := f.Type().Field(i).Name; name != "" {
+ p.WriteString(name)
+ p.WriteByte(':')
+ }
+ }
+ p.printValue(getField(f, i), verb, depth+1)
+ }
+ p.WriteByte('}')
+ case reflect.Interface:
+ value := f.Elem()
+ if !value.IsValid() {
+ if p.fmt.SharpV {
+ p.WriteString(f.Type().String())
+ p.WriteString(nilParenString)
+ } else {
+ p.WriteString(nilAngleString)
+ }
+ } else {
+ p.printValue(value, verb, depth+1)
+ }
+ case reflect.Array, reflect.Slice:
+ switch verb {
+ case 's', 'q', 'x', 'X':
+ // Handle byte and uint8 slices and arrays special for the above verbs.
+ t := f.Type()
+ if t.Elem().Kind() == reflect.Uint8 {
+ var bytes []byte
+ if f.Kind() == reflect.Slice {
+ bytes = f.Bytes()
+ } else if f.CanAddr() {
+ bytes = f.Slice(0, f.Len()).Bytes()
+ } else {
+ // We have an array, but we cannot Slice() a non-addressable array,
+ // so we build a slice by hand. This is a rare case but it would be nice
+ // if reflection could help a little more.
+ bytes = make([]byte, f.Len())
+ for i := range bytes {
+ bytes[i] = byte(f.Index(i).Uint())
+ }
+ }
+ p.fmtBytes(bytes, verb, t.String())
+ return
+ }
+ }
+ if p.fmt.SharpV {
+ p.WriteString(f.Type().String())
+ if f.Kind() == reflect.Slice && f.IsNil() {
+ p.WriteString(nilParenString)
+ return
+ }
+ p.WriteByte('{')
+ for i := 0; i < f.Len(); i++ {
+ if i > 0 {
+ p.WriteString(commaSpaceString)
+ }
+ p.printValue(f.Index(i), verb, depth+1)
+ }
+ p.WriteByte('}')
+ } else {
+ p.WriteByte('[')
+ for i := 0; i < f.Len(); i++ {
+ if i > 0 {
+ p.WriteByte(' ')
+ }
+ p.printValue(f.Index(i), verb, depth+1)
+ }
+ p.WriteByte(']')
+ }
+ case reflect.Ptr:
+ // pointer to array or slice or struct? ok at top level
+ // but not embedded (avoid loops)
+ if depth == 0 && f.Pointer() != 0 {
+ switch a := f.Elem(); a.Kind() {
+ case reflect.Array, reflect.Slice, reflect.Struct, reflect.Map:
+ p.WriteByte('&')
+ p.printValue(a, verb, depth+1)
+ return
+ }
+ }
+ fallthrough
+ case reflect.Chan, reflect.Func, reflect.UnsafePointer:
+ p.fmtPointer(f, verb)
+ default:
+ p.unknownType(f)
+ }
+}
+
+func (p *printer) badArgNum(verb rune) {
+ p.WriteString(percentBangString)
+ p.WriteRune(verb)
+ p.WriteString(badIndexString)
+}
+
+func (p *printer) missingArg(verb rune) {
+ p.WriteString(percentBangString)
+ p.WriteRune(verb)
+ p.WriteString(missingString)
+}
+
+func (p *printer) doPrintf(fmt string) {
+ for p.fmt.Parser.SetFormat(fmt); p.fmt.Scan(); {
+ switch p.fmt.Status {
+ case format.StatusText:
+ p.WriteString(p.fmt.Text())
+ case format.StatusSubstitution:
+ p.printArg(p.Arg(p.fmt.ArgNum), p.fmt.Verb)
+ case format.StatusBadWidthSubstitution:
+ p.WriteString(badWidthString)
+ p.printArg(p.Arg(p.fmt.ArgNum), p.fmt.Verb)
+ case format.StatusBadPrecSubstitution:
+ p.WriteString(badPrecString)
+ p.printArg(p.Arg(p.fmt.ArgNum), p.fmt.Verb)
+ case format.StatusNoVerb:
+ p.WriteString(noVerbString)
+ case format.StatusBadArgNum:
+ p.badArgNum(p.fmt.Verb)
+ case format.StatusMissingArg:
+ p.missingArg(p.fmt.Verb)
+ default:
+ panic("unreachable")
+ }
+ }
+
+ // Check for extra arguments, but only if there was at least one ordered
+ // argument. Note that this behavior is necessarily different from fmt:
+ // different variants of messages may opt to drop some or all of the
+ // arguments.
+ if !p.fmt.Reordered && p.fmt.ArgNum < len(p.fmt.Args) && p.fmt.ArgNum != 0 {
+ p.fmt.ClearFlags()
+ p.WriteString(extraString)
+ for i, arg := range p.fmt.Args[p.fmt.ArgNum:] {
+ if i > 0 {
+ p.WriteString(commaSpaceString)
+ }
+ if arg == nil {
+ p.WriteString(nilAngleString)
+ } else {
+ p.WriteString(reflect.TypeOf(arg).String())
+ p.WriteString("=")
+ p.printArg(arg, 'v')
+ }
+ }
+ p.WriteByte(')')
+ }
+}
+
+func (p *printer) doPrint(a []interface{}) {
+ prevString := false
+ for argNum, arg := range a {
+ isString := arg != nil && reflect.TypeOf(arg).Kind() == reflect.String
+ // Add a space between two non-string arguments.
+ if argNum > 0 && !isString && !prevString {
+ p.WriteByte(' ')
+ }
+ p.printArg(arg, 'v')
+ prevString = isString
+ }
+}
+
+// doPrintln is like doPrint but always adds a space between arguments
+// and a newline after the last argument.
+func (p *printer) doPrintln(a []interface{}) {
+ for argNum, arg := range a {
+ if argNum > 0 {
+ p.WriteByte(' ')
+ }
+ p.printArg(arg, 'v')
+ }
+ p.WriteByte('\n')
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/api/errors/OWNERS b/vendor/k8s.io/apimachinery/pkg/api/errors/OWNERS
index 155648acb6..1a9f5e7706 100644
--- a/vendor/k8s.io/apimachinery/pkg/api/errors/OWNERS
+++ b/vendor/k8s.io/apimachinery/pkg/api/errors/OWNERS
@@ -2,7 +2,6 @@
reviewers:
- thockin
- - lavalamp
- smarterclayton
- wojtek-t
- deads2k
diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/help.go b/vendor/k8s.io/apimachinery/pkg/api/meta/help.go
index 1bf6b06d47..1fdd32c4ba 100644
--- a/vendor/k8s.io/apimachinery/pkg/api/meta/help.go
+++ b/vendor/k8s.io/apimachinery/pkg/api/meta/help.go
@@ -112,8 +112,27 @@ func getItemsPtr(list runtime.Object) (interface{}, error) {
// EachListItem invokes fn on each runtime.Object in the list. Any error immediately terminates
// the loop.
+//
+// If items passed to fn are retained for different durations, and you want to avoid
+// retaining all items in obj as long as any item is referenced, use EachListItemWithAlloc instead.
func EachListItem(obj runtime.Object, fn func(runtime.Object) error) error {
+ return eachListItem(obj, fn, false)
+}
+
+// EachListItemWithAlloc works like EachListItem, but avoids retaining references to the items slice in obj.
+// It does this by making a shallow copy of non-pointer items in obj.
+//
+// If the items passed to fn are not retained, or are retained for the same duration, use EachListItem instead for memory efficiency.
+func EachListItemWithAlloc(obj runtime.Object, fn func(runtime.Object) error) error {
+ return eachListItem(obj, fn, true)
+}
+
+// allocNew: Whether shallow copy is required when the elements in Object.Items are struct
+func eachListItem(obj runtime.Object, fn func(runtime.Object) error, allocNew bool) error {
if unstructured, ok := obj.(runtime.Unstructured); ok {
+ if allocNew {
+ return unstructured.EachListItemWithAlloc(fn)
+ }
return unstructured.EachListItem(fn)
}
// TODO: Change to an interface call?
@@ -140,8 +159,19 @@ func EachListItem(obj runtime.Object, fn func(runtime.Object) error) error {
for i := 0; i < len; i++ {
raw := items.Index(i)
if takeAddr {
- raw = raw.Addr()
+ if allocNew {
+ // shallow copy to avoid retaining a reference to the original list item
+ itemCopy := reflect.New(raw.Type())
+ // assign to itemCopy and type-assert
+ itemCopy.Elem().Set(raw)
+ // reflect.New will guarantee that itemCopy must be a pointer.
+ raw = itemCopy
+ } else {
+ raw = raw.Addr()
+ }
}
+ // raw must be a pointer or an interface
+ // allocate a pointer is cheap
switch item := raw.Interface().(type) {
case *runtime.RawExtension:
if err := fn(item.Object); err != nil {
@@ -166,7 +196,23 @@ func EachListItem(obj runtime.Object, fn func(runtime.Object) error) error {
// ExtractList returns obj's Items element as an array of runtime.Objects.
// Returns an error if obj is not a List type (does not have an Items member).
+//
+// If items in the returned list are retained for different durations, and you want to avoid
+// retaining all items in obj as long as any item is referenced, use ExtractListWithAlloc instead.
func ExtractList(obj runtime.Object) ([]runtime.Object, error) {
+ return extractList(obj, false)
+}
+
+// ExtractListWithAlloc works like ExtractList, but avoids retaining references to the items slice in obj.
+// It does this by making a shallow copy of non-pointer items in obj.
+//
+// If the items in the returned list are not retained, or are retained for the same duration, use ExtractList instead for memory efficiency.
+func ExtractListWithAlloc(obj runtime.Object) ([]runtime.Object, error) {
+ return extractList(obj, true)
+}
+
+// allocNew: Whether shallow copy is required when the elements in Object.Items are struct
+func extractList(obj runtime.Object, allocNew bool) ([]runtime.Object, error) {
itemsPtr, err := GetItemsPtr(obj)
if err != nil {
return nil, err
@@ -176,10 +222,17 @@ func ExtractList(obj runtime.Object) ([]runtime.Object, error) {
return nil, err
}
list := make([]runtime.Object, items.Len())
+ if len(list) == 0 {
+ return list, nil
+ }
+ elemType := items.Type().Elem()
+ isRawExtension := elemType == rawExtensionObjectType
+ implementsObject := elemType.Implements(objectType)
for i := range list {
raw := items.Index(i)
- switch item := raw.Interface().(type) {
- case runtime.RawExtension:
+ switch {
+ case isRawExtension:
+ item := raw.Interface().(runtime.RawExtension)
switch {
case item.Object != nil:
list[i] = item.Object
@@ -189,8 +242,18 @@ func ExtractList(obj runtime.Object) ([]runtime.Object, error) {
default:
list[i] = nil
}
- case runtime.Object:
- list[i] = item
+ case implementsObject:
+ list[i] = raw.Interface().(runtime.Object)
+ case allocNew:
+ // shallow copy to avoid retaining a reference to the original list item
+ itemCopy := reflect.New(raw.Type())
+ // assign to itemCopy and type-assert
+ itemCopy.Elem().Set(raw)
+ var ok bool
+ // reflect.New will guarantee that itemCopy must be a pointer.
+ if list[i], ok = itemCopy.Interface().(runtime.Object); !ok {
+ return nil, fmt.Errorf("%v: item[%v]: Expected object, got %#v(%s)", obj, i, raw.Interface(), raw.Kind())
+ }
default:
var found bool
if list[i], found = raw.Addr().Interface().(runtime.Object); !found {
@@ -201,8 +264,12 @@ func ExtractList(obj runtime.Object) ([]runtime.Object, error) {
return list, nil
}
-// objectSliceType is the type of a slice of Objects
-var objectSliceType = reflect.TypeOf([]runtime.Object{})
+var (
+ // objectSliceType is the type of a slice of Objects
+ objectSliceType = reflect.TypeOf([]runtime.Object{})
+ objectType = reflect.TypeOf((*runtime.Object)(nil)).Elem()
+ rawExtensionObjectType = reflect.TypeOf(runtime.RawExtension{})
+)
// LenList returns the length of this list or 0 if it is not a list.
func LenList(list runtime.Object) int {
@@ -237,7 +304,7 @@ func SetList(list runtime.Object, objects []runtime.Object) error {
slice := reflect.MakeSlice(items.Type(), len(objects), len(objects))
for i := range objects {
dest := slice.Index(i)
- if dest.Type() == reflect.TypeOf(runtime.RawExtension{}) {
+ if dest.Type() == rawExtensionObjectType {
dest = dest.FieldByName("Object")
}
diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/OWNERS b/vendor/k8s.io/apimachinery/pkg/api/resource/OWNERS
index d1c9f53074..063fd285da 100644
--- a/vendor/k8s.io/apimachinery/pkg/api/resource/OWNERS
+++ b/vendor/k8s.io/apimachinery/pkg/api/resource/OWNERS
@@ -2,7 +2,6 @@
reviewers:
- thockin
- - lavalamp
- smarterclayton
- wojtek-t
- derekwaynecarr
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto
index 48955dca85..a2cd8015fb 100644
--- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto
@@ -425,8 +425,6 @@ message LabelSelector {
// relates the key and values.
message LabelSelectorRequirement {
// key is the label key that the selector applies to.
- // +patchMergeKey=key
- // +patchStrategy=merge
optional string key = 1;
// operator represents a key's relationship to a set of values.
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go
index 352d58ebc2..8a8ff70189 100644
--- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go
@@ -995,6 +995,24 @@ const (
// CauseTypeFieldValueNotSupported is used to report valid (as per formatting rules)
// values that can not be handled (e.g. an enumerated string).
CauseTypeFieldValueNotSupported CauseType = "FieldValueNotSupported"
+ // CauseTypeForbidden is used to report valid (as per formatting rules)
+ // values which would be accepted under some conditions, but which are not
+ // permitted by the current conditions (such as security policy). See
+ // Forbidden().
+ CauseTypeForbidden CauseType = "FieldValueForbidden"
+ // CauseTypeTooLong is used to report that the given value is too long.
+ // This is similar to ErrorTypeInvalid, but the error will not include the
+ // too-long value. See TooLong().
+ CauseTypeTooLong CauseType = "FieldValueTooLong"
+ // CauseTypeTooMany is used to report "too many". This is used to
+ // report that a given list has too many items. This is similar to FieldValueTooLong,
+ // but the error indicates quantity instead of length.
+ CauseTypeTooMany CauseType = "FieldValueTooMany"
+ // CauseTypeInternal is used to report other errors that are not related
+ // to user input. See InternalError().
+ CauseTypeInternal CauseType = "InternalError"
+ // CauseTypeTypeInvalid is for the value did not match the schema type for that field
+ CauseTypeTypeInvalid CauseType = "FieldValueTypeInvalid"
// CauseTypeUnexpectedServerResponse is used to report when the server responded to the client
// without the expected return type. The presence of this cause indicates the error may be
// due to an intervening proxy or the server software malfunctioning.
@@ -1207,9 +1225,7 @@ type LabelSelector struct {
// relates the key and values.
type LabelSelectorRequirement struct {
// key is the label key that the selector applies to.
- // +patchMergeKey=key
- // +patchStrategy=merge
- Key string `json:"key" patchStrategy:"merge" patchMergeKey:"key" protobuf:"bytes,1,opt,name=key"`
+ Key string `json:"key" protobuf:"bytes,1,opt,name=key"`
// operator represents a key's relationship to a set of values.
// Valid operators are In, NotIn, Exists and DoesNotExist.
Operator LabelSelectorOperator `json:"operator" protobuf:"bytes,2,opt,name=operator,casttype=LabelSelectorOperator"`
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go
index a499eee8eb..40d289f375 100644
--- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go
@@ -101,6 +101,11 @@ func (obj *Unstructured) EachListItem(fn func(runtime.Object) error) error {
return nil
}
+func (obj *Unstructured) EachListItemWithAlloc(fn func(runtime.Object) error) error {
+ // EachListItem has allocated a new Object for the user, we can use it directly.
+ return obj.EachListItem(fn)
+}
+
func (obj *Unstructured) UnstructuredContent() map[string]interface{} {
if obj.Object == nil {
return make(map[string]interface{})
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured_list.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured_list.go
index 5028f5fb57..82beda2a29 100644
--- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured_list.go
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured_list.go
@@ -52,6 +52,15 @@ func (u *UnstructuredList) EachListItem(fn func(runtime.Object) error) error {
return nil
}
+func (u *UnstructuredList) EachListItemWithAlloc(fn func(runtime.Object) error) error {
+ for i := range u.Items {
+ if err := fn(&Unstructured{Object: u.Items[i].Object}); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
// NewEmptyInstance returns a new instance of the concrete type containing only kind/apiVersion and no other data.
// This should be called instead of reflect.New() for unstructured types because the go type alone does not preserve kind/apiVersion info.
func (u *UnstructuredList) NewEmptyInstance() runtime.Unstructured {
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/codec.go b/vendor/k8s.io/apimachinery/pkg/runtime/codec.go
index 7fc513dd0e..73f85286c2 100644
--- a/vendor/k8s.io/apimachinery/pkg/runtime/codec.go
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/codec.go
@@ -45,7 +45,6 @@ func NewCodec(e Encoder, d Decoder) Codec {
// Encode is a convenience wrapper for encoding to a []byte from an Encoder
func Encode(e Encoder, obj Object) ([]byte, error) {
- // TODO: reuse buffer
buf := &bytes.Buffer{}
if err := e.Encode(obj, buf); err != nil {
return nil, err
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go b/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go
index 710a977952..e89ea89391 100644
--- a/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go
@@ -365,4 +365,9 @@ type Unstructured interface {
// error should terminate the iteration. If IsList() returns false, this method should return an error
// instead of calling the provided function.
EachListItem(func(Object) error) error
+ // EachListItemWithAlloc works like EachListItem, but avoids retaining references to a slice of items.
+ // It does this by making a shallow copy of non-pointer items before passing them to fn.
+ //
+ // If the items passed to fn are not retained, or are retained for the same duration, use EachListItem instead for memory efficiency.
+ EachListItemWithAlloc(func(Object) error) error
}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/schema/group_version.go b/vendor/k8s.io/apimachinery/pkg/runtime/schema/group_version.go
index 54ccb7a74c..d1c37c9429 100644
--- a/vendor/k8s.io/apimachinery/pkg/runtime/schema/group_version.go
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/schema/group_version.go
@@ -39,7 +39,7 @@ func ParseResourceArg(arg string) (*GroupVersionResource, GroupResource) {
// ParseKindArg takes the common style of string which may be either `Kind.group.com` or `Kind.version.group.com`
// and parses it out into both possibilities. This code takes no responsibility for knowing which representation was intended
// but with a knowledge of all GroupKinds, calling code can take a very good guess. If there are only two segments, then
-// `*GroupVersionResource` is nil.
+// `*GroupVersionKind` is nil.
// `Kind.group.com` -> `group=com, version=group, kind=Kind` and `group=group.com, kind=Kind`
func ParseKindArg(arg string) (*GroupVersionKind, GroupKind) {
var gvk *GroupVersionKind
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/splice.go b/vendor/k8s.io/apimachinery/pkg/runtime/splice.go
new file mode 100644
index 0000000000..2badb7b97f
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/splice.go
@@ -0,0 +1,76 @@
+/*
+Copyright 2023 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package runtime
+
+import (
+ "bytes"
+ "io"
+)
+
+// Splice is the interface that wraps the Splice method.
+//
+// Splice moves data from given slice without copying the underlying data for
+// efficiency purpose. Therefore, the caller should make sure the underlying
+// data is not changed later.
+type Splice interface {
+ Splice([]byte)
+ io.Writer
+ Reset()
+ Bytes() []byte
+}
+
+// A spliceBuffer implements Splice and io.Writer interfaces.
+type spliceBuffer struct {
+ raw []byte
+ buf *bytes.Buffer
+}
+
+func NewSpliceBuffer() Splice {
+ return &spliceBuffer{}
+}
+
+// Splice implements the Splice interface.
+func (sb *spliceBuffer) Splice(raw []byte) {
+ sb.raw = raw
+}
+
+// Write implements the io.Writer interface.
+func (sb *spliceBuffer) Write(p []byte) (n int, err error) {
+ if sb.buf == nil {
+ sb.buf = &bytes.Buffer{}
+ }
+ return sb.buf.Write(p)
+}
+
+// Reset resets the buffer to be empty.
+func (sb *spliceBuffer) Reset() {
+ if sb.buf != nil {
+ sb.buf.Reset()
+ }
+ sb.raw = nil
+}
+
+// Bytes returns the data held by the buffer.
+func (sb *spliceBuffer) Bytes() []byte {
+ if sb.buf != nil && len(sb.buf.Bytes()) > 0 {
+ return sb.buf.Bytes()
+ }
+ if sb.raw != nil {
+ return sb.raw
+ }
+ return []byte{}
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/cache/expiring.go b/vendor/k8s.io/apimachinery/pkg/util/cache/expiring.go
index 0d2f153bf9..1396274c7b 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/cache/expiring.go
+++ b/vendor/k8s.io/apimachinery/pkg/util/cache/expiring.go
@@ -40,6 +40,13 @@ func NewExpiringWithClock(clock clock.Clock) *Expiring {
// Expiring is a map whose entries expire after a per-entry timeout.
type Expiring struct {
+ // AllowExpiredGet causes the expiration check to be skipped on Get.
+ // It should only be used when a key always corresponds to the exact same value.
+ // Thus when this field is true, expired keys are considered valid
+ // until the next call to Set (which causes the GC to run).
+ // It may not be changed concurrently with calls to Get.
+ AllowExpiredGet bool
+
clock clock.Clock
// mu protects the below fields
@@ -70,7 +77,10 @@ func (c *Expiring) Get(key interface{}) (val interface{}, ok bool) {
c.mu.RLock()
defer c.mu.RUnlock()
e, ok := c.cache[key]
- if !ok || !c.clock.Now().Before(e.expiry) {
+ if !ok {
+ return nil, false
+ }
+ if !c.AllowExpiredGet && !c.clock.Now().Before(e.expiry) {
return nil, false
}
return e.val, true
diff --git a/vendor/k8s.io/apimachinery/pkg/util/diff/diff.go b/vendor/k8s.io/apimachinery/pkg/util/diff/diff.go
index ec4002e38a..fc03018449 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/diff/diff.go
+++ b/vendor/k8s.io/apimachinery/pkg/util/diff/diff.go
@@ -23,34 +23,20 @@ import (
"strings"
"text/tabwriter"
- "github.com/davecgh/go-spew/spew"
"github.com/google/go-cmp/cmp"
+ "k8s.io/apimachinery/pkg/util/dump"
)
-// StringDiff diffs a and b and returns a human readable diff.
-func StringDiff(a, b string) string {
- ba := []byte(a)
- bb := []byte(b)
- out := []byte{}
- i := 0
- for ; i < len(ba) && i < len(bb); i++ {
- if ba[i] != bb[i] {
- break
- }
- out = append(out, ba[i])
- }
- out = append(out, []byte("\n\nA: ")...)
- out = append(out, ba[i:]...)
- out = append(out, []byte("\n\nB: ")...)
- out = append(out, bb[i:]...)
- out = append(out, []byte("\n\n")...)
- return string(out)
-}
-
func legacyDiff(a, b interface{}) string {
return cmp.Diff(a, b)
}
+// StringDiff diffs a and b and returns a human readable diff.
+// DEPRECATED: use github.com/google/go-cmp/cmp.Diff
+func StringDiff(a, b string) string {
+ return legacyDiff(a, b)
+}
+
// ObjectDiff prints the diff of two go objects and fails if the objects
// contain unhandled unexported fields.
// DEPRECATED: use github.com/google/go-cmp/cmp.Diff
@@ -75,13 +61,8 @@ func ObjectReflectDiff(a, b interface{}) string {
// ObjectGoPrintSideBySide prints a and b as textual dumps side by side,
// enabling easy visual scanning for mismatches.
func ObjectGoPrintSideBySide(a, b interface{}) string {
- s := spew.ConfigState{
- Indent: " ",
- // Extra deep spew.
- DisableMethods: true,
- }
- sA := s.Sdump(a)
- sB := s.Sdump(b)
+ sA := dump.Pretty(a)
+ sB := dump.Pretty(b)
linesA := strings.Split(sA, "\n")
linesB := strings.Split(sB, "\n")
diff --git a/vendor/k8s.io/apimachinery/pkg/util/dump/dump.go b/vendor/k8s.io/apimachinery/pkg/util/dump/dump.go
new file mode 100644
index 0000000000..cf61ef76ae
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/dump/dump.go
@@ -0,0 +1,54 @@
+/*
+Copyright 2021 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package dump
+
+import (
+ "github.com/davecgh/go-spew/spew"
+)
+
+var prettyPrintConfig = &spew.ConfigState{
+ Indent: " ",
+ DisableMethods: true,
+ DisablePointerAddresses: true,
+ DisableCapacities: true,
+}
+
+// The config MUST NOT be changed because that could change the result of a hash operation
+var prettyPrintConfigForHash = &spew.ConfigState{
+ Indent: " ",
+ SortKeys: true,
+ DisableMethods: true,
+ SpewKeys: true,
+ DisablePointerAddresses: true,
+ DisableCapacities: true,
+}
+
+// Pretty wrap the spew.Sdump with Indent, and disabled methods like error() and String()
+// The output may change over time, so for guaranteed output please take more direct control
+func Pretty(a interface{}) string {
+ return prettyPrintConfig.Sdump(a)
+}
+
+// ForHash keeps the original Spew.Sprintf format to ensure the same checksum
+func ForHash(a interface{}) string {
+ return prettyPrintConfigForHash.Sprintf("%#v", a)
+}
+
+// OneLine outputs the object in one line
+func OneLine(a interface{}) string {
+ return prettyPrintConfig.Sprintf("%#v", a)
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/roundtripper.go b/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/roundtripper.go
index 27c3d2d564..7fe52ee568 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/roundtripper.go
+++ b/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/roundtripper.go
@@ -23,7 +23,7 @@ import (
"encoding/base64"
"errors"
"fmt"
- "io/ioutil"
+ "io"
"net"
"net/http"
"net/http/httputil"
@@ -337,7 +337,7 @@ func (s *SpdyRoundTripper) NewConnection(resp *http.Response) (httpstream.Connec
if (resp.StatusCode != http.StatusSwitchingProtocols) || !strings.Contains(connectionHeader, strings.ToLower(httpstream.HeaderUpgrade)) || !strings.Contains(upgradeHeader, strings.ToLower(HeaderSpdy31)) {
defer resp.Body.Close()
responseError := ""
- responseErrorBytes, err := ioutil.ReadAll(resp.Body)
+ responseErrorBytes, err := io.ReadAll(resp.Body)
if err != nil {
responseError = "unable to read error from server response"
} else {
diff --git a/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go b/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go
index 5e80097045..0ea88156be 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go
+++ b/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go
@@ -54,7 +54,7 @@ const (
// FromInt creates an IntOrString object with an int32 value. It is
// your responsibility not to call this method with a value greater
// than int32.
-// TODO: convert to (val int32)
+// Deprecated: use FromInt32 instead.
func FromInt(val int) IntOrString {
if val > math.MaxInt32 || val < math.MinInt32 {
klog.Errorf("value: %d overflows int32\n%s\n", val, debug.Stack())
@@ -62,6 +62,11 @@ func FromInt(val int) IntOrString {
return IntOrString{Type: Int, IntVal: int32(val)}
}
+// FromInt32 creates an IntOrString object with an int32 value.
+func FromInt32(val int32) IntOrString {
+ return IntOrString{Type: Int, IntVal: val}
+}
+
// FromString creates an IntOrString object with a string value.
func FromString(val string) IntOrString {
return IntOrString{Type: String, StrVal: val}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/fieldmanager.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/fieldmanager.go
index f3111d4bc7..eca04a7116 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/fieldmanager.go
+++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/fieldmanager.go
@@ -56,17 +56,20 @@ func NewFieldManager(f Manager, subresource string) *FieldManager {
// newDefaultFieldManager is a helper function which wraps a Manager with certain default logic.
func NewDefaultFieldManager(f Manager, typeConverter TypeConverter, objectConverter runtime.ObjectConvertor, objectCreater runtime.ObjectCreater, kind schema.GroupVersionKind, subresource string) *FieldManager {
return NewFieldManager(
- NewLastAppliedUpdater(
- NewLastAppliedManager(
- NewProbabilisticSkipNonAppliedManager(
- NewCapManagersManager(
- NewBuildManagerInfoManager(
- NewManagedFieldsUpdater(
- NewStripMetaManager(f),
- ), kind.GroupVersion(), subresource,
- ), DefaultMaxUpdateManagers,
- ), objectCreater, kind, DefaultTrackOnCreateProbability,
- ), typeConverter, objectConverter, kind.GroupVersion()),
+ NewVersionCheckManager(
+ NewLastAppliedUpdater(
+ NewLastAppliedManager(
+ NewProbabilisticSkipNonAppliedManager(
+ NewCapManagersManager(
+ NewBuildManagerInfoManager(
+ NewManagedFieldsUpdater(
+ NewStripMetaManager(f),
+ ), kind.GroupVersion(), subresource,
+ ), DefaultMaxUpdateManagers,
+ ), objectCreater, DefaultTrackOnCreateProbability,
+ ), typeConverter, objectConverter, kind.GroupVersion(),
+ ),
+ ), kind,
), subresource,
)
}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/skipnonapplied.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/skipnonapplied.go
index 6b281ec1e5..f24c040edd 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/skipnonapplied.go
+++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/skipnonapplied.go
@@ -22,13 +22,11 @@ import (
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/runtime"
- "k8s.io/apimachinery/pkg/runtime/schema"
)
type skipNonAppliedManager struct {
fieldManager Manager
objectCreater runtime.ObjectCreater
- gvk schema.GroupVersionKind
beforeApplyManagerName string
probability float32
}
@@ -36,17 +34,16 @@ type skipNonAppliedManager struct {
var _ Manager = &skipNonAppliedManager{}
// NewSkipNonAppliedManager creates a new wrapped FieldManager that only starts tracking managers after the first apply.
-func NewSkipNonAppliedManager(fieldManager Manager, objectCreater runtime.ObjectCreater, gvk schema.GroupVersionKind) Manager {
- return NewProbabilisticSkipNonAppliedManager(fieldManager, objectCreater, gvk, 0.0)
+func NewSkipNonAppliedManager(fieldManager Manager, objectCreater runtime.ObjectCreater) Manager {
+ return NewProbabilisticSkipNonAppliedManager(fieldManager, objectCreater, 0.0)
}
// NewProbabilisticSkipNonAppliedManager creates a new wrapped FieldManager that starts tracking managers after the first apply,
// or starts tracking on create with p probability.
-func NewProbabilisticSkipNonAppliedManager(fieldManager Manager, objectCreater runtime.ObjectCreater, gvk schema.GroupVersionKind, p float32) Manager {
+func NewProbabilisticSkipNonAppliedManager(fieldManager Manager, objectCreater runtime.ObjectCreater, p float32) Manager {
return &skipNonAppliedManager{
fieldManager: fieldManager,
objectCreater: objectCreater,
- gvk: gvk,
beforeApplyManagerName: "before-first-apply",
probability: p,
}
@@ -78,9 +75,10 @@ func (f *skipNonAppliedManager) Update(liveObj, newObj runtime.Object, managed M
// Apply implements Manager.
func (f *skipNonAppliedManager) Apply(liveObj, appliedObj runtime.Object, managed Managed, fieldManager string, force bool) (runtime.Object, Managed, error) {
if len(managed.Fields()) == 0 {
- emptyObj, err := f.objectCreater.New(f.gvk)
+ gvk := appliedObj.GetObjectKind().GroupVersionKind()
+ emptyObj, err := f.objectCreater.New(gvk)
if err != nil {
- return nil, nil, fmt.Errorf("failed to create empty object of type %v: %v", f.gvk, err)
+ return nil, nil, fmt.Errorf("failed to create empty object of type %v: %v", gvk, err)
}
liveObj, managed, err = f.fieldManager.Update(emptyObj, liveObj, managed, f.beforeApplyManagerName)
if err != nil {
diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/versioncheck.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/versioncheck.go
new file mode 100644
index 0000000000..ee1e2bca70
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/versioncheck.go
@@ -0,0 +1,52 @@
+/*
+Copyright 2023 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package internal
+
+import (
+ "fmt"
+
+ "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+type versionCheckManager struct {
+ fieldManager Manager
+ gvk schema.GroupVersionKind
+}
+
+var _ Manager = &versionCheckManager{}
+
+// NewVersionCheckManager creates a manager that makes sure that the
+// applied object is in the proper version.
+func NewVersionCheckManager(fieldManager Manager, gvk schema.GroupVersionKind) Manager {
+ return &versionCheckManager{fieldManager: fieldManager, gvk: gvk}
+}
+
+// Update implements Manager.
+func (f *versionCheckManager) Update(liveObj, newObj runtime.Object, managed Managed, manager string) (runtime.Object, Managed, error) {
+ // Nothing to do for updates, this is checked in many other places.
+ return f.fieldManager.Update(liveObj, newObj, managed, manager)
+}
+
+// Apply implements Manager.
+func (f *versionCheckManager) Apply(liveObj, appliedObj runtime.Object, managed Managed, fieldManager string, force bool) (runtime.Object, Managed, error) {
+ if gvk := appliedObj.GetObjectKind().GroupVersionKind(); gvk != f.gvk {
+ return nil, nil, errors.NewBadRequest(fmt.Sprintf("invalid object type: %v", gvk))
+ }
+ return f.fieldManager.Apply(liveObj, appliedObj, managed, fieldManager, force)
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/mergepatch/util.go b/vendor/k8s.io/apimachinery/pkg/util/mergepatch/util.go
index a20efd1871..25626cf3af 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/mergepatch/util.go
+++ b/vendor/k8s.io/apimachinery/pkg/util/mergepatch/util.go
@@ -20,7 +20,7 @@ import (
"fmt"
"reflect"
- "github.com/davecgh/go-spew/spew"
+ "k8s.io/apimachinery/pkg/util/dump"
"sigs.k8s.io/yaml"
)
@@ -76,7 +76,7 @@ func ToYAMLOrError(v interface{}) string {
func toYAML(v interface{}) (string, error) {
y, err := yaml.Marshal(v)
if err != nil {
- return "", fmt.Errorf("yaml marshal failed:%v\n%v\n", err, spew.Sdump(v))
+ return "", fmt.Errorf("yaml marshal failed:%v\n%v\n", err, dump.Pretty(v))
}
return string(y), nil
diff --git a/vendor/k8s.io/apimachinery/pkg/util/net/util.go b/vendor/k8s.io/apimachinery/pkg/util/net/util.go
index 1c2aba55f7..1635e69a5c 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/net/util.go
+++ b/vendor/k8s.io/apimachinery/pkg/util/net/util.go
@@ -20,6 +20,7 @@ import (
"errors"
"net"
"reflect"
+ "strings"
"syscall"
)
@@ -47,6 +48,11 @@ func IsConnectionReset(err error) bool {
return false
}
+// Returns if the given err is "http2: client connection lost" error.
+func IsHTTP2ConnectionLost(err error) bool {
+ return err != nil && strings.Contains(err.Error(), "http2: client connection lost")
+}
+
// Returns if the given err is "connection refused" error
func IsConnectionRefused(err error) bool {
var errno syscall.Errno
diff --git a/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go b/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go
index 3674914f70..d738725caf 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go
+++ b/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go
@@ -126,17 +126,14 @@ type rudimentaryErrorBackoff struct {
// OnError will block if it is called more often than the embedded period time.
// This will prevent overly tight hot error loops.
func (r *rudimentaryErrorBackoff) OnError(error) {
- now := time.Now() // start the timer before acquiring the lock
r.lastErrorTimeLock.Lock()
- d := now.Sub(r.lastErrorTime)
+ defer r.lastErrorTimeLock.Unlock()
+ d := time.Since(r.lastErrorTime)
+ if d < r.minPeriod {
+ // If the time moves backwards for any reason, do nothing
+ time.Sleep(r.minPeriod - d)
+ }
r.lastErrorTime = time.Now()
- r.lastErrorTimeLock.Unlock()
-
- // Do not sleep with the lock held because that causes all callers of HandleError to block.
- // We only want the current goroutine to block.
- // A negative or zero duration causes time.Sleep to return immediately.
- // If the time moves backwards for any reason, do nothing.
- time.Sleep(r.minPeriod - d)
}
// GetCaller returns the caller of the function that calls it.
diff --git a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go
index 3ee683b997..920c113bbd 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go
+++ b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go
@@ -1182,7 +1182,13 @@ func mergePatchIntoOriginal(original, patch map[string]interface{}, schema Looku
merged = originalFieldValue
case !foundOriginal && foundPatch:
// list was added
- merged = patchFieldValue
+ v, keep := removeDirectives(patchFieldValue)
+ if !keep {
+ // Shouldn't be possible since patchFieldValue is a slice
+ continue
+ }
+
+ merged = v.([]interface{})
case foundOriginal && foundPatch:
merged, err = mergeSliceHandler(originalList, patchList, subschema,
patchStrategy, patchMeta.GetPatchMergeKey(), false, mergeOptions)
@@ -1270,6 +1276,42 @@ func partitionMapsByPresentInList(original, partitionBy []interface{}, mergeKey
return patch, serverOnly, nil
}
+// Removes directives from an object and returns value to use instead and whether
+// or not the field/index should even be kept
+// May modify input
+func removeDirectives(obj interface{}) (interface{}, bool) {
+ if obj == nil {
+ return obj, true
+ } else if typedV, ok := obj.(map[string]interface{}); ok {
+ if _, hasDirective := typedV[directiveMarker]; hasDirective {
+ return nil, false
+ }
+
+ for k, v := range typedV {
+ var keep bool
+ typedV[k], keep = removeDirectives(v)
+ if !keep {
+ delete(typedV, k)
+ }
+ }
+ return typedV, true
+ } else if typedV, ok := obj.([]interface{}); ok {
+ var res []interface{}
+ if typedV != nil {
+ // Make sure res is non-nil if patch is non-nil
+ res = []interface{}{}
+ }
+ for _, v := range typedV {
+ if newV, keep := removeDirectives(v); keep {
+ res = append(res, newV)
+ }
+ }
+ return res, true
+ } else {
+ return obj, true
+ }
+}
+
// Merge fields from a patch map into the original map. Note: This may modify
// both the original map and the patch because getting a deep copy of a map in
// golang is highly non-trivial.
@@ -1333,7 +1375,10 @@ func mergeMap(original, patch map[string]interface{}, schema LookupPatchMeta, me
if mergeOptions.IgnoreUnmatchedNulls {
discardNullValuesFromPatch(patchV)
}
- original[k] = patchV
+ original[k], ok = removeDirectives(patchV)
+ if !ok {
+ delete(original, k)
+ }
}
continue
}
@@ -1345,7 +1390,10 @@ func mergeMap(original, patch map[string]interface{}, schema LookupPatchMeta, me
if mergeOptions.IgnoreUnmatchedNulls {
discardNullValuesFromPatch(patchV)
}
- original[k] = patchV
+ original[k], ok = removeDirectives(patchV)
+ if !ok {
+ delete(original, k)
+ }
}
continue
}
@@ -1372,7 +1420,11 @@ func mergeMap(original, patch map[string]interface{}, schema LookupPatchMeta, me
}
original[k], err = mergeSliceHandler(original[k], patchV, subschema, patchStrategy, patchMeta.GetPatchMergeKey(), isDeleteList, mergeOptions)
default:
- original[k] = patchV
+ original[k], ok = removeDirectives(patchV)
+ if !ok {
+ // if patchV itself is a directive, then don't keep it
+ delete(original, k)
+ }
}
if err != nil {
return nil, err
@@ -1425,7 +1477,8 @@ func mergeSliceHandler(original, patch interface{}, schema LookupPatchMeta,
return nil, err
}
- if fieldPatchStrategy == mergeDirective {
+ // Delete lists are handled the same way regardless of what the field's patch strategy is
+ if fieldPatchStrategy == mergeDirective || isDeleteList {
return mergeSlice(typedOriginal, typedPatch, schema, fieldPatchMergeKey, mergeOptions, isDeleteList)
} else {
return typedPatch, nil
diff --git a/vendor/k8s.io/apimachinery/pkg/util/version/version.go b/vendor/k8s.io/apimachinery/pkg/util/version/version.go
index 8c997ec450..4c61956953 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/version/version.go
+++ b/vendor/k8s.io/apimachinery/pkg/util/version/version.go
@@ -121,6 +121,11 @@ func MustParseSemantic(str string) *Version {
return v
}
+// MajorMinor returns a version with the provided major and minor version.
+func MajorMinor(major, minor uint) *Version {
+ return &Version{components: []uint{major, minor}}
+}
+
// Major returns the major release number
func (v *Version) Major() uint {
return v.components[0]
diff --git a/vendor/k8s.io/apimachinery/pkg/util/wait/poll.go b/vendor/k8s.io/apimachinery/pkg/util/wait/poll.go
index 32e8688ca0..231d4c3842 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/wait/poll.go
+++ b/vendor/k8s.io/apimachinery/pkg/util/wait/poll.go
@@ -38,10 +38,10 @@ func PollUntilContextCancel(ctx context.Context, interval time.Duration, immedia
// a deadline and is equivalent to:
//
// deadlineCtx, deadlineCancel := context.WithTimeout(ctx, timeout)
-// err := PollUntilContextCancel(ctx, interval, immediate, condition)
+// err := PollUntilContextCancel(deadlineCtx, interval, immediate, condition)
//
// The deadline context will be cancelled if the Poll succeeds before the timeout, simplifying
-// inline usage. All other behavior is identical to PollWithContextTimeout.
+// inline usage. All other behavior is identical to PollUntilContextCancel.
func PollUntilContextTimeout(ctx context.Context, interval, timeout time.Duration, immediate bool, condition ConditionWithContextFunc) error {
deadlineCtx, deadlineCancel := context.WithTimeout(ctx, timeout)
defer deadlineCancel()
@@ -59,7 +59,7 @@ func PollUntilContextTimeout(ctx context.Context, interval, timeout time.Duratio
//
// If you want to Poll something forever, see PollInfinite.
//
-// Deprecated: This method does not return errors from context, use PollWithContextTimeout.
+// Deprecated: This method does not return errors from context, use PollUntilContextTimeout.
// Note that the new method will no longer return ErrWaitTimeout and instead return errors
// defined by the context package. Will be removed in a future release.
func Poll(interval, timeout time.Duration, condition ConditionFunc) error {
@@ -78,7 +78,7 @@ func Poll(interval, timeout time.Duration, condition ConditionFunc) error {
//
// If you want to Poll something forever, see PollInfinite.
//
-// Deprecated: This method does not return errors from context, use PollWithContextTimeout.
+// Deprecated: This method does not return errors from context, use PollUntilContextTimeout.
// Note that the new method will no longer return ErrWaitTimeout and instead return errors
// defined by the context package. Will be removed in a future release.
func PollWithContext(ctx context.Context, interval, timeout time.Duration, condition ConditionWithContextFunc) error {
@@ -91,7 +91,7 @@ func PollWithContext(ctx context.Context, interval, timeout time.Duration, condi
// PollUntil always waits interval before the first run of 'condition'.
// 'condition' will always be invoked at least once.
//
-// Deprecated: This method does not return errors from context, use PollWithContextCancel.
+// Deprecated: This method does not return errors from context, use PollUntilContextCancel.
// Note that the new method will no longer return ErrWaitTimeout and instead return errors
// defined by the context package. Will be removed in a future release.
func PollUntil(interval time.Duration, condition ConditionFunc, stopCh <-chan struct{}) error {
@@ -104,7 +104,7 @@ func PollUntil(interval time.Duration, condition ConditionFunc, stopCh <-chan st
// PollUntilWithContext always waits interval before the first run of 'condition'.
// 'condition' will always be invoked at least once.
//
-// Deprecated: This method does not return errors from context, use PollWithContextCancel.
+// Deprecated: This method does not return errors from context, use PollUntilContextCancel.
// Note that the new method will no longer return ErrWaitTimeout and instead return errors
// defined by the context package. Will be removed in a future release.
func PollUntilWithContext(ctx context.Context, interval time.Duration, condition ConditionWithContextFunc) error {
@@ -118,7 +118,7 @@ func PollUntilWithContext(ctx context.Context, interval time.Duration, condition
// Some intervals may be missed if the condition takes too long or the time
// window is too short.
//
-// Deprecated: This method does not return errors from context, use PollWithContextCancel.
+// Deprecated: This method does not return errors from context, use PollUntilContextCancel.
// Note that the new method will no longer return ErrWaitTimeout and instead return errors
// defined by the context package. Will be removed in a future release.
func PollInfinite(interval time.Duration, condition ConditionFunc) error {
@@ -132,7 +132,7 @@ func PollInfinite(interval time.Duration, condition ConditionFunc) error {
// Some intervals may be missed if the condition takes too long or the time
// window is too short.
//
-// Deprecated: This method does not return errors from context, use PollWithContextCancel.
+// Deprecated: This method does not return errors from context, use PollUntilContextCancel.
// Note that the new method will no longer return ErrWaitTimeout and instead return errors
// defined by the context package. Will be removed in a future release.
func PollInfiniteWithContext(ctx context.Context, interval time.Duration, condition ConditionWithContextFunc) error {
@@ -150,7 +150,7 @@ func PollInfiniteWithContext(ctx context.Context, interval time.Duration, condit
//
// If you want to immediately Poll something forever, see PollImmediateInfinite.
//
-// Deprecated: This method does not return errors from context, use PollWithContextTimeout.
+// Deprecated: This method does not return errors from context, use PollUntilContextTimeout.
// Note that the new method will no longer return ErrWaitTimeout and instead return errors
// defined by the context package. Will be removed in a future release.
func PollImmediate(interval, timeout time.Duration, condition ConditionFunc) error {
@@ -168,7 +168,7 @@ func PollImmediate(interval, timeout time.Duration, condition ConditionFunc) err
//
// If you want to immediately Poll something forever, see PollImmediateInfinite.
//
-// Deprecated: This method does not return errors from context, use PollWithContextTimeout.
+// Deprecated: This method does not return errors from context, use PollUntilContextTimeout.
// Note that the new method will no longer return ErrWaitTimeout and instead return errors
// defined by the context package. Will be removed in a future release.
func PollImmediateWithContext(ctx context.Context, interval, timeout time.Duration, condition ConditionWithContextFunc) error {
@@ -180,7 +180,7 @@ func PollImmediateWithContext(ctx context.Context, interval, timeout time.Durati
// PollImmediateUntil runs the 'condition' before waiting for the interval.
// 'condition' will always be invoked at least once.
//
-// Deprecated: This method does not return errors from context, use PollWithContextCancel.
+// Deprecated: This method does not return errors from context, use PollUntilContextCancel.
// Note that the new method will no longer return ErrWaitTimeout and instead return errors
// defined by the context package. Will be removed in a future release.
func PollImmediateUntil(interval time.Duration, condition ConditionFunc, stopCh <-chan struct{}) error {
@@ -193,7 +193,7 @@ func PollImmediateUntil(interval time.Duration, condition ConditionFunc, stopCh
// PollImmediateUntilWithContext runs the 'condition' before waiting for the interval.
// 'condition' will always be invoked at least once.
//
-// Deprecated: This method does not return errors from context, use PollWithContextCancel.
+// Deprecated: This method does not return errors from context, use PollUntilContextCancel.
// Note that the new method will no longer return ErrWaitTimeout and instead return errors
// defined by the context package. Will be removed in a future release.
func PollImmediateUntilWithContext(ctx context.Context, interval time.Duration, condition ConditionWithContextFunc) error {
@@ -207,7 +207,7 @@ func PollImmediateUntilWithContext(ctx context.Context, interval time.Duration,
// Some intervals may be missed if the condition takes too long or the time
// window is too short.
//
-// Deprecated: This method does not return errors from context, use PollWithContextCancel.
+// Deprecated: This method does not return errors from context, use PollUntilContextCancel.
// Note that the new method will no longer return ErrWaitTimeout and instead return errors
// defined by the context package. Will be removed in a future release.
func PollImmediateInfinite(interval time.Duration, condition ConditionFunc) error {
@@ -222,7 +222,7 @@ func PollImmediateInfinite(interval time.Duration, condition ConditionFunc) erro
// Some intervals may be missed if the condition takes too long or the time
// window is too short.
//
-// Deprecated: This method does not return errors from context, use PollWithContextCancel.
+// Deprecated: This method does not return errors from context, use PollUntilContextCancel.
// Note that the new method will no longer return ErrWaitTimeout and instead return errors
// defined by the context package. Will be removed in a future release.
func PollImmediateInfiniteWithContext(ctx context.Context, interval time.Duration, condition ConditionWithContextFunc) error {
diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/paramref.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/paramref.go
index 1102f65f31..0951cae8a9 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/paramref.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/paramref.go
@@ -18,11 +18,18 @@ limitations under the License.
package v1alpha1
+import (
+ v1alpha1 "k8s.io/api/admissionregistration/v1alpha1"
+ v1 "k8s.io/client-go/applyconfigurations/meta/v1"
+)
+
// ParamRefApplyConfiguration represents an declarative configuration of the ParamRef type for use
// with apply.
type ParamRefApplyConfiguration struct {
- Name *string `json:"name,omitempty"`
- Namespace *string `json:"namespace,omitempty"`
+ Name *string `json:"name,omitempty"`
+ Namespace *string `json:"namespace,omitempty"`
+ Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"`
+ ParameterNotFoundAction *v1alpha1.ParameterNotFoundActionType `json:"parameterNotFoundAction,omitempty"`
}
// ParamRefApplyConfiguration constructs an declarative configuration of the ParamRef type for use with
@@ -46,3 +53,19 @@ func (b *ParamRefApplyConfiguration) WithNamespace(value string) *ParamRefApplyC
b.Namespace = &value
return b
}
+
+// WithSelector sets the Selector field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Selector field is set to the value of the last call.
+func (b *ParamRefApplyConfiguration) WithSelector(value *v1.LabelSelectorApplyConfiguration) *ParamRefApplyConfiguration {
+ b.Selector = value
+ return b
+}
+
+// WithParameterNotFoundAction sets the ParameterNotFoundAction field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the ParameterNotFoundAction field is set to the value of the last call.
+func (b *ParamRefApplyConfiguration) WithParameterNotFoundAction(value v1alpha1.ParameterNotFoundActionType) *ParamRefApplyConfiguration {
+ b.ParameterNotFoundAction = &value
+ return b
+}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicyspec.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicyspec.go
index f674b5b1ec..7ee320e428 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicyspec.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicyspec.go
@@ -31,6 +31,7 @@ type ValidatingAdmissionPolicySpecApplyConfiguration struct {
FailurePolicy *admissionregistrationv1alpha1.FailurePolicyType `json:"failurePolicy,omitempty"`
AuditAnnotations []AuditAnnotationApplyConfiguration `json:"auditAnnotations,omitempty"`
MatchConditions []MatchConditionApplyConfiguration `json:"matchConditions,omitempty"`
+ Variables []VariableApplyConfiguration `json:"variables,omitempty"`
}
// ValidatingAdmissionPolicySpecApplyConfiguration constructs an declarative configuration of the ValidatingAdmissionPolicySpec type for use with
@@ -101,3 +102,16 @@ func (b *ValidatingAdmissionPolicySpecApplyConfiguration) WithMatchConditions(va
}
return b
}
+
+// WithVariables adds the given value to the Variables field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the Variables field.
+func (b *ValidatingAdmissionPolicySpecApplyConfiguration) WithVariables(values ...*VariableApplyConfiguration) *ValidatingAdmissionPolicySpecApplyConfiguration {
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithVariables")
+ }
+ b.Variables = append(b.Variables, *values[i])
+ }
+ return b
+}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/variable.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/variable.go
new file mode 100644
index 0000000000..2c70a8cfb5
--- /dev/null
+++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/variable.go
@@ -0,0 +1,48 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1alpha1
+
+// VariableApplyConfiguration represents an declarative configuration of the Variable type for use
+// with apply.
+type VariableApplyConfiguration struct {
+ Name *string `json:"name,omitempty"`
+ Expression *string `json:"expression,omitempty"`
+}
+
+// VariableApplyConfiguration constructs an declarative configuration of the Variable type for use with
+// apply.
+func Variable() *VariableApplyConfiguration {
+ return &VariableApplyConfiguration{}
+}
+
+// WithName sets the Name field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Name field is set to the value of the last call.
+func (b *VariableApplyConfiguration) WithName(value string) *VariableApplyConfiguration {
+ b.Name = &value
+ return b
+}
+
+// WithExpression sets the Expression field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Expression field is set to the value of the last call.
+func (b *VariableApplyConfiguration) WithExpression(value string) *VariableApplyConfiguration {
+ b.Expression = &value
+ return b
+}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/auditannotation.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/auditannotation.go
new file mode 100644
index 0000000000..e92fba0ddb
--- /dev/null
+++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/auditannotation.go
@@ -0,0 +1,48 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1beta1
+
+// AuditAnnotationApplyConfiguration represents an declarative configuration of the AuditAnnotation type for use
+// with apply.
+type AuditAnnotationApplyConfiguration struct {
+ Key *string `json:"key,omitempty"`
+ ValueExpression *string `json:"valueExpression,omitempty"`
+}
+
+// AuditAnnotationApplyConfiguration constructs an declarative configuration of the AuditAnnotation type for use with
+// apply.
+func AuditAnnotation() *AuditAnnotationApplyConfiguration {
+ return &AuditAnnotationApplyConfiguration{}
+}
+
+// WithKey sets the Key field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Key field is set to the value of the last call.
+func (b *AuditAnnotationApplyConfiguration) WithKey(value string) *AuditAnnotationApplyConfiguration {
+ b.Key = &value
+ return b
+}
+
+// WithValueExpression sets the ValueExpression field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the ValueExpression field is set to the value of the last call.
+func (b *AuditAnnotationApplyConfiguration) WithValueExpression(value string) *AuditAnnotationApplyConfiguration {
+ b.ValueExpression = &value
+ return b
+}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/expressionwarning.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/expressionwarning.go
new file mode 100644
index 0000000000..059c1b94ba
--- /dev/null
+++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/expressionwarning.go
@@ -0,0 +1,48 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1beta1
+
+// ExpressionWarningApplyConfiguration represents an declarative configuration of the ExpressionWarning type for use
+// with apply.
+type ExpressionWarningApplyConfiguration struct {
+ FieldRef *string `json:"fieldRef,omitempty"`
+ Warning *string `json:"warning,omitempty"`
+}
+
+// ExpressionWarningApplyConfiguration constructs an declarative configuration of the ExpressionWarning type for use with
+// apply.
+func ExpressionWarning() *ExpressionWarningApplyConfiguration {
+ return &ExpressionWarningApplyConfiguration{}
+}
+
+// WithFieldRef sets the FieldRef field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the FieldRef field is set to the value of the last call.
+func (b *ExpressionWarningApplyConfiguration) WithFieldRef(value string) *ExpressionWarningApplyConfiguration {
+ b.FieldRef = &value
+ return b
+}
+
+// WithWarning sets the Warning field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Warning field is set to the value of the last call.
+func (b *ExpressionWarningApplyConfiguration) WithWarning(value string) *ExpressionWarningApplyConfiguration {
+ b.Warning = &value
+ return b
+}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/matchresources.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/matchresources.go
new file mode 100644
index 0000000000..25d4139db6
--- /dev/null
+++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/matchresources.go
@@ -0,0 +1,90 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+ admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1"
+ v1 "k8s.io/client-go/applyconfigurations/meta/v1"
+)
+
+// MatchResourcesApplyConfiguration represents an declarative configuration of the MatchResources type for use
+// with apply.
+type MatchResourcesApplyConfiguration struct {
+ NamespaceSelector *v1.LabelSelectorApplyConfiguration `json:"namespaceSelector,omitempty"`
+ ObjectSelector *v1.LabelSelectorApplyConfiguration `json:"objectSelector,omitempty"`
+ ResourceRules []NamedRuleWithOperationsApplyConfiguration `json:"resourceRules,omitempty"`
+ ExcludeResourceRules []NamedRuleWithOperationsApplyConfiguration `json:"excludeResourceRules,omitempty"`
+ MatchPolicy *admissionregistrationv1beta1.MatchPolicyType `json:"matchPolicy,omitempty"`
+}
+
+// MatchResourcesApplyConfiguration constructs an declarative configuration of the MatchResources type for use with
+// apply.
+func MatchResources() *MatchResourcesApplyConfiguration {
+ return &MatchResourcesApplyConfiguration{}
+}
+
+// WithNamespaceSelector sets the NamespaceSelector field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the NamespaceSelector field is set to the value of the last call.
+func (b *MatchResourcesApplyConfiguration) WithNamespaceSelector(value *v1.LabelSelectorApplyConfiguration) *MatchResourcesApplyConfiguration {
+ b.NamespaceSelector = value
+ return b
+}
+
+// WithObjectSelector sets the ObjectSelector field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the ObjectSelector field is set to the value of the last call.
+func (b *MatchResourcesApplyConfiguration) WithObjectSelector(value *v1.LabelSelectorApplyConfiguration) *MatchResourcesApplyConfiguration {
+ b.ObjectSelector = value
+ return b
+}
+
+// WithResourceRules adds the given value to the ResourceRules field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the ResourceRules field.
+func (b *MatchResourcesApplyConfiguration) WithResourceRules(values ...*NamedRuleWithOperationsApplyConfiguration) *MatchResourcesApplyConfiguration {
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithResourceRules")
+ }
+ b.ResourceRules = append(b.ResourceRules, *values[i])
+ }
+ return b
+}
+
+// WithExcludeResourceRules adds the given value to the ExcludeResourceRules field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the ExcludeResourceRules field.
+func (b *MatchResourcesApplyConfiguration) WithExcludeResourceRules(values ...*NamedRuleWithOperationsApplyConfiguration) *MatchResourcesApplyConfiguration {
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithExcludeResourceRules")
+ }
+ b.ExcludeResourceRules = append(b.ExcludeResourceRules, *values[i])
+ }
+ return b
+}
+
+// WithMatchPolicy sets the MatchPolicy field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the MatchPolicy field is set to the value of the last call.
+func (b *MatchResourcesApplyConfiguration) WithMatchPolicy(value admissionregistrationv1beta1.MatchPolicyType) *MatchResourcesApplyConfiguration {
+ b.MatchPolicy = &value
+ return b
+}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/namedrulewithoperations.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/namedrulewithoperations.go
new file mode 100644
index 0000000000..fa346c4a57
--- /dev/null
+++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/namedrulewithoperations.go
@@ -0,0 +1,95 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+ admissionregistrationv1 "k8s.io/api/admissionregistration/v1"
+ v1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1"
+)
+
+// NamedRuleWithOperationsApplyConfiguration represents an declarative configuration of the NamedRuleWithOperations type for use
+// with apply.
+type NamedRuleWithOperationsApplyConfiguration struct {
+ ResourceNames []string `json:"resourceNames,omitempty"`
+ v1.RuleWithOperationsApplyConfiguration `json:",inline"`
+}
+
+// NamedRuleWithOperationsApplyConfiguration constructs an declarative configuration of the NamedRuleWithOperations type for use with
+// apply.
+func NamedRuleWithOperations() *NamedRuleWithOperationsApplyConfiguration {
+ return &NamedRuleWithOperationsApplyConfiguration{}
+}
+
+// WithResourceNames adds the given value to the ResourceNames field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the ResourceNames field.
+func (b *NamedRuleWithOperationsApplyConfiguration) WithResourceNames(values ...string) *NamedRuleWithOperationsApplyConfiguration {
+ for i := range values {
+ b.ResourceNames = append(b.ResourceNames, values[i])
+ }
+ return b
+}
+
+// WithOperations adds the given value to the Operations field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the Operations field.
+func (b *NamedRuleWithOperationsApplyConfiguration) WithOperations(values ...admissionregistrationv1.OperationType) *NamedRuleWithOperationsApplyConfiguration {
+ for i := range values {
+ b.Operations = append(b.Operations, values[i])
+ }
+ return b
+}
+
+// WithAPIGroups adds the given value to the APIGroups field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the APIGroups field.
+func (b *NamedRuleWithOperationsApplyConfiguration) WithAPIGroups(values ...string) *NamedRuleWithOperationsApplyConfiguration {
+ for i := range values {
+ b.APIGroups = append(b.APIGroups, values[i])
+ }
+ return b
+}
+
+// WithAPIVersions adds the given value to the APIVersions field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the APIVersions field.
+func (b *NamedRuleWithOperationsApplyConfiguration) WithAPIVersions(values ...string) *NamedRuleWithOperationsApplyConfiguration {
+ for i := range values {
+ b.APIVersions = append(b.APIVersions, values[i])
+ }
+ return b
+}
+
+// WithResources adds the given value to the Resources field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the Resources field.
+func (b *NamedRuleWithOperationsApplyConfiguration) WithResources(values ...string) *NamedRuleWithOperationsApplyConfiguration {
+ for i := range values {
+ b.Resources = append(b.Resources, values[i])
+ }
+ return b
+}
+
+// WithScope sets the Scope field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Scope field is set to the value of the last call.
+func (b *NamedRuleWithOperationsApplyConfiguration) WithScope(value admissionregistrationv1.ScopeType) *NamedRuleWithOperationsApplyConfiguration {
+ b.Scope = &value
+ return b
+}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/paramkind.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/paramkind.go
new file mode 100644
index 0000000000..6050e60251
--- /dev/null
+++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/paramkind.go
@@ -0,0 +1,48 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1beta1
+
+// ParamKindApplyConfiguration represents an declarative configuration of the ParamKind type for use
+// with apply.
+type ParamKindApplyConfiguration struct {
+ APIVersion *string `json:"apiVersion,omitempty"`
+ Kind *string `json:"kind,omitempty"`
+}
+
+// ParamKindApplyConfiguration constructs an declarative configuration of the ParamKind type for use with
+// apply.
+func ParamKind() *ParamKindApplyConfiguration {
+ return &ParamKindApplyConfiguration{}
+}
+
+// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the APIVersion field is set to the value of the last call.
+func (b *ParamKindApplyConfiguration) WithAPIVersion(value string) *ParamKindApplyConfiguration {
+ b.APIVersion = &value
+ return b
+}
+
+// WithKind sets the Kind field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Kind field is set to the value of the last call.
+func (b *ParamKindApplyConfiguration) WithKind(value string) *ParamKindApplyConfiguration {
+ b.Kind = &value
+ return b
+}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/paramref.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/paramref.go
new file mode 100644
index 0000000000..2be98dbc52
--- /dev/null
+++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/paramref.go
@@ -0,0 +1,71 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+ v1beta1 "k8s.io/api/admissionregistration/v1beta1"
+ v1 "k8s.io/client-go/applyconfigurations/meta/v1"
+)
+
+// ParamRefApplyConfiguration represents an declarative configuration of the ParamRef type for use
+// with apply.
+type ParamRefApplyConfiguration struct {
+ Name *string `json:"name,omitempty"`
+ Namespace *string `json:"namespace,omitempty"`
+ Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"`
+ ParameterNotFoundAction *v1beta1.ParameterNotFoundActionType `json:"parameterNotFoundAction,omitempty"`
+}
+
+// ParamRefApplyConfiguration constructs an declarative configuration of the ParamRef type for use with
+// apply.
+func ParamRef() *ParamRefApplyConfiguration {
+ return &ParamRefApplyConfiguration{}
+}
+
+// WithName sets the Name field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Name field is set to the value of the last call.
+func (b *ParamRefApplyConfiguration) WithName(value string) *ParamRefApplyConfiguration {
+ b.Name = &value
+ return b
+}
+
+// WithNamespace sets the Namespace field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Namespace field is set to the value of the last call.
+func (b *ParamRefApplyConfiguration) WithNamespace(value string) *ParamRefApplyConfiguration {
+ b.Namespace = &value
+ return b
+}
+
+// WithSelector sets the Selector field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Selector field is set to the value of the last call.
+func (b *ParamRefApplyConfiguration) WithSelector(value *v1.LabelSelectorApplyConfiguration) *ParamRefApplyConfiguration {
+ b.Selector = value
+ return b
+}
+
+// WithParameterNotFoundAction sets the ParameterNotFoundAction field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the ParameterNotFoundAction field is set to the value of the last call.
+func (b *ParamRefApplyConfiguration) WithParameterNotFoundAction(value v1beta1.ParameterNotFoundActionType) *ParamRefApplyConfiguration {
+ b.ParameterNotFoundAction = &value
+ return b
+}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/typechecking.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/typechecking.go
new file mode 100644
index 0000000000..07baf334cd
--- /dev/null
+++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/typechecking.go
@@ -0,0 +1,44 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1beta1
+
+// TypeCheckingApplyConfiguration represents an declarative configuration of the TypeChecking type for use
+// with apply.
+type TypeCheckingApplyConfiguration struct {
+ ExpressionWarnings []ExpressionWarningApplyConfiguration `json:"expressionWarnings,omitempty"`
+}
+
+// TypeCheckingApplyConfiguration constructs an declarative configuration of the TypeChecking type for use with
+// apply.
+func TypeChecking() *TypeCheckingApplyConfiguration {
+ return &TypeCheckingApplyConfiguration{}
+}
+
+// WithExpressionWarnings adds the given value to the ExpressionWarnings field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the ExpressionWarnings field.
+func (b *TypeCheckingApplyConfiguration) WithExpressionWarnings(values ...*ExpressionWarningApplyConfiguration) *TypeCheckingApplyConfiguration {
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithExpressionWarnings")
+ }
+ b.ExpressionWarnings = append(b.ExpressionWarnings, *values[i])
+ }
+ return b
+}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicy.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicy.go
new file mode 100644
index 0000000000..e144bc9f70
--- /dev/null
+++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicy.go
@@ -0,0 +1,256 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+ admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ managedfields "k8s.io/apimachinery/pkg/util/managedfields"
+ internal "k8s.io/client-go/applyconfigurations/internal"
+ v1 "k8s.io/client-go/applyconfigurations/meta/v1"
+)
+
+// ValidatingAdmissionPolicyApplyConfiguration represents an declarative configuration of the ValidatingAdmissionPolicy type for use
+// with apply.
+type ValidatingAdmissionPolicyApplyConfiguration struct {
+ v1.TypeMetaApplyConfiguration `json:",inline"`
+ *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
+ Spec *ValidatingAdmissionPolicySpecApplyConfiguration `json:"spec,omitempty"`
+ Status *ValidatingAdmissionPolicyStatusApplyConfiguration `json:"status,omitempty"`
+}
+
+// ValidatingAdmissionPolicy constructs an declarative configuration of the ValidatingAdmissionPolicy type for use with
+// apply.
+func ValidatingAdmissionPolicy(name string) *ValidatingAdmissionPolicyApplyConfiguration {
+ b := &ValidatingAdmissionPolicyApplyConfiguration{}
+ b.WithName(name)
+ b.WithKind("ValidatingAdmissionPolicy")
+ b.WithAPIVersion("admissionregistration.k8s.io/v1beta1")
+ return b
+}
+
+// ExtractValidatingAdmissionPolicy extracts the applied configuration owned by fieldManager from
+// validatingAdmissionPolicy. If no managedFields are found in validatingAdmissionPolicy for fieldManager, a
+// ValidatingAdmissionPolicyApplyConfiguration is returned with only the Name, Namespace (if applicable),
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
+// field managers have taken ownership of all the fields previously owned by fieldManager, or because
+// the fieldManager never owned fields any fields.
+// validatingAdmissionPolicy must be a unmodified ValidatingAdmissionPolicy API object that was retrieved from the Kubernetes API.
+// ExtractValidatingAdmissionPolicy provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+// Experimental!
+func ExtractValidatingAdmissionPolicy(validatingAdmissionPolicy *admissionregistrationv1beta1.ValidatingAdmissionPolicy, fieldManager string) (*ValidatingAdmissionPolicyApplyConfiguration, error) {
+ return extractValidatingAdmissionPolicy(validatingAdmissionPolicy, fieldManager, "")
+}
+
+// ExtractValidatingAdmissionPolicyStatus is the same as ExtractValidatingAdmissionPolicy except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractValidatingAdmissionPolicyStatus(validatingAdmissionPolicy *admissionregistrationv1beta1.ValidatingAdmissionPolicy, fieldManager string) (*ValidatingAdmissionPolicyApplyConfiguration, error) {
+ return extractValidatingAdmissionPolicy(validatingAdmissionPolicy, fieldManager, "status")
+}
+
+func extractValidatingAdmissionPolicy(validatingAdmissionPolicy *admissionregistrationv1beta1.ValidatingAdmissionPolicy, fieldManager string, subresource string) (*ValidatingAdmissionPolicyApplyConfiguration, error) {
+ b := &ValidatingAdmissionPolicyApplyConfiguration{}
+ err := managedfields.ExtractInto(validatingAdmissionPolicy, internal.Parser().Type("io.k8s.api.admissionregistration.v1beta1.ValidatingAdmissionPolicy"), fieldManager, b, subresource)
+ if err != nil {
+ return nil, err
+ }
+ b.WithName(validatingAdmissionPolicy.Name)
+
+ b.WithKind("ValidatingAdmissionPolicy")
+ b.WithAPIVersion("admissionregistration.k8s.io/v1beta1")
+ return b, nil
+}
+
+// WithKind sets the Kind field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Kind field is set to the value of the last call.
+func (b *ValidatingAdmissionPolicyApplyConfiguration) WithKind(value string) *ValidatingAdmissionPolicyApplyConfiguration {
+ b.Kind = &value
+ return b
+}
+
+// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the APIVersion field is set to the value of the last call.
+func (b *ValidatingAdmissionPolicyApplyConfiguration) WithAPIVersion(value string) *ValidatingAdmissionPolicyApplyConfiguration {
+ b.APIVersion = &value
+ return b
+}
+
+// WithName sets the Name field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Name field is set to the value of the last call.
+func (b *ValidatingAdmissionPolicyApplyConfiguration) WithName(value string) *ValidatingAdmissionPolicyApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.Name = &value
+ return b
+}
+
+// WithGenerateName sets the GenerateName field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the GenerateName field is set to the value of the last call.
+func (b *ValidatingAdmissionPolicyApplyConfiguration) WithGenerateName(value string) *ValidatingAdmissionPolicyApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.GenerateName = &value
+ return b
+}
+
+// WithNamespace sets the Namespace field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Namespace field is set to the value of the last call.
+func (b *ValidatingAdmissionPolicyApplyConfiguration) WithNamespace(value string) *ValidatingAdmissionPolicyApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.Namespace = &value
+ return b
+}
+
+// WithUID sets the UID field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the UID field is set to the value of the last call.
+func (b *ValidatingAdmissionPolicyApplyConfiguration) WithUID(value types.UID) *ValidatingAdmissionPolicyApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.UID = &value
+ return b
+}
+
+// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the ResourceVersion field is set to the value of the last call.
+func (b *ValidatingAdmissionPolicyApplyConfiguration) WithResourceVersion(value string) *ValidatingAdmissionPolicyApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.ResourceVersion = &value
+ return b
+}
+
+// WithGeneration sets the Generation field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Generation field is set to the value of the last call.
+func (b *ValidatingAdmissionPolicyApplyConfiguration) WithGeneration(value int64) *ValidatingAdmissionPolicyApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.Generation = &value
+ return b
+}
+
+// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the CreationTimestamp field is set to the value of the last call.
+func (b *ValidatingAdmissionPolicyApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ValidatingAdmissionPolicyApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.CreationTimestamp = &value
+ return b
+}
+
+// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
+func (b *ValidatingAdmissionPolicyApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ValidatingAdmissionPolicyApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.DeletionTimestamp = &value
+ return b
+}
+
+// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
+func (b *ValidatingAdmissionPolicyApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ValidatingAdmissionPolicyApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.DeletionGracePeriodSeconds = &value
+ return b
+}
+
+// WithLabels puts the entries into the Labels field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, the entries provided by each call will be put on the Labels field,
+// overwriting an existing map entries in Labels field with the same key.
+func (b *ValidatingAdmissionPolicyApplyConfiguration) WithLabels(entries map[string]string) *ValidatingAdmissionPolicyApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ if b.Labels == nil && len(entries) > 0 {
+ b.Labels = make(map[string]string, len(entries))
+ }
+ for k, v := range entries {
+ b.Labels[k] = v
+ }
+ return b
+}
+
+// WithAnnotations puts the entries into the Annotations field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, the entries provided by each call will be put on the Annotations field,
+// overwriting an existing map entries in Annotations field with the same key.
+func (b *ValidatingAdmissionPolicyApplyConfiguration) WithAnnotations(entries map[string]string) *ValidatingAdmissionPolicyApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ if b.Annotations == nil && len(entries) > 0 {
+ b.Annotations = make(map[string]string, len(entries))
+ }
+ for k, v := range entries {
+ b.Annotations[k] = v
+ }
+ return b
+}
+
+// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the OwnerReferences field.
+func (b *ValidatingAdmissionPolicyApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ValidatingAdmissionPolicyApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithOwnerReferences")
+ }
+ b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ }
+ return b
+}
+
+// WithFinalizers adds the given value to the Finalizers field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the Finalizers field.
+func (b *ValidatingAdmissionPolicyApplyConfiguration) WithFinalizers(values ...string) *ValidatingAdmissionPolicyApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ for i := range values {
+ b.Finalizers = append(b.Finalizers, values[i])
+ }
+ return b
+}
+
+func (b *ValidatingAdmissionPolicyApplyConfiguration) ensureObjectMetaApplyConfigurationExists() {
+ if b.ObjectMetaApplyConfiguration == nil {
+ b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{}
+ }
+}
+
+// WithSpec sets the Spec field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Spec field is set to the value of the last call.
+func (b *ValidatingAdmissionPolicyApplyConfiguration) WithSpec(value *ValidatingAdmissionPolicySpecApplyConfiguration) *ValidatingAdmissionPolicyApplyConfiguration {
+ b.Spec = value
+ return b
+}
+
+// WithStatus sets the Status field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Status field is set to the value of the last call.
+func (b *ValidatingAdmissionPolicyApplyConfiguration) WithStatus(value *ValidatingAdmissionPolicyStatusApplyConfiguration) *ValidatingAdmissionPolicyApplyConfiguration {
+ b.Status = value
+ return b
+}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicybinding.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicybinding.go
new file mode 100644
index 0000000000..0dc06aedec
--- /dev/null
+++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicybinding.go
@@ -0,0 +1,247 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+ admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ managedfields "k8s.io/apimachinery/pkg/util/managedfields"
+ internal "k8s.io/client-go/applyconfigurations/internal"
+ v1 "k8s.io/client-go/applyconfigurations/meta/v1"
+)
+
+// ValidatingAdmissionPolicyBindingApplyConfiguration represents an declarative configuration of the ValidatingAdmissionPolicyBinding type for use
+// with apply.
+type ValidatingAdmissionPolicyBindingApplyConfiguration struct {
+ v1.TypeMetaApplyConfiguration `json:",inline"`
+ *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
+ Spec *ValidatingAdmissionPolicyBindingSpecApplyConfiguration `json:"spec,omitempty"`
+}
+
+// ValidatingAdmissionPolicyBinding constructs an declarative configuration of the ValidatingAdmissionPolicyBinding type for use with
+// apply.
+func ValidatingAdmissionPolicyBinding(name string) *ValidatingAdmissionPolicyBindingApplyConfiguration {
+ b := &ValidatingAdmissionPolicyBindingApplyConfiguration{}
+ b.WithName(name)
+ b.WithKind("ValidatingAdmissionPolicyBinding")
+ b.WithAPIVersion("admissionregistration.k8s.io/v1beta1")
+ return b
+}
+
+// ExtractValidatingAdmissionPolicyBinding extracts the applied configuration owned by fieldManager from
+// validatingAdmissionPolicyBinding. If no managedFields are found in validatingAdmissionPolicyBinding for fieldManager, a
+// ValidatingAdmissionPolicyBindingApplyConfiguration is returned with only the Name, Namespace (if applicable),
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
+// field managers have taken ownership of all the fields previously owned by fieldManager, or because
+// the fieldManager never owned fields any fields.
+// validatingAdmissionPolicyBinding must be a unmodified ValidatingAdmissionPolicyBinding API object that was retrieved from the Kubernetes API.
+// ExtractValidatingAdmissionPolicyBinding provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+// Experimental!
+func ExtractValidatingAdmissionPolicyBinding(validatingAdmissionPolicyBinding *admissionregistrationv1beta1.ValidatingAdmissionPolicyBinding, fieldManager string) (*ValidatingAdmissionPolicyBindingApplyConfiguration, error) {
+ return extractValidatingAdmissionPolicyBinding(validatingAdmissionPolicyBinding, fieldManager, "")
+}
+
+// ExtractValidatingAdmissionPolicyBindingStatus is the same as ExtractValidatingAdmissionPolicyBinding except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractValidatingAdmissionPolicyBindingStatus(validatingAdmissionPolicyBinding *admissionregistrationv1beta1.ValidatingAdmissionPolicyBinding, fieldManager string) (*ValidatingAdmissionPolicyBindingApplyConfiguration, error) {
+ return extractValidatingAdmissionPolicyBinding(validatingAdmissionPolicyBinding, fieldManager, "status")
+}
+
+func extractValidatingAdmissionPolicyBinding(validatingAdmissionPolicyBinding *admissionregistrationv1beta1.ValidatingAdmissionPolicyBinding, fieldManager string, subresource string) (*ValidatingAdmissionPolicyBindingApplyConfiguration, error) {
+ b := &ValidatingAdmissionPolicyBindingApplyConfiguration{}
+ err := managedfields.ExtractInto(validatingAdmissionPolicyBinding, internal.Parser().Type("io.k8s.api.admissionregistration.v1beta1.ValidatingAdmissionPolicyBinding"), fieldManager, b, subresource)
+ if err != nil {
+ return nil, err
+ }
+ b.WithName(validatingAdmissionPolicyBinding.Name)
+
+ b.WithKind("ValidatingAdmissionPolicyBinding")
+ b.WithAPIVersion("admissionregistration.k8s.io/v1beta1")
+ return b, nil
+}
+
+// WithKind sets the Kind field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Kind field is set to the value of the last call.
+func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithKind(value string) *ValidatingAdmissionPolicyBindingApplyConfiguration {
+ b.Kind = &value
+ return b
+}
+
+// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the APIVersion field is set to the value of the last call.
+func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithAPIVersion(value string) *ValidatingAdmissionPolicyBindingApplyConfiguration {
+ b.APIVersion = &value
+ return b
+}
+
+// WithName sets the Name field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Name field is set to the value of the last call.
+func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithName(value string) *ValidatingAdmissionPolicyBindingApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.Name = &value
+ return b
+}
+
+// WithGenerateName sets the GenerateName field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the GenerateName field is set to the value of the last call.
+func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithGenerateName(value string) *ValidatingAdmissionPolicyBindingApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.GenerateName = &value
+ return b
+}
+
+// WithNamespace sets the Namespace field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Namespace field is set to the value of the last call.
+func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithNamespace(value string) *ValidatingAdmissionPolicyBindingApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.Namespace = &value
+ return b
+}
+
+// WithUID sets the UID field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the UID field is set to the value of the last call.
+func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithUID(value types.UID) *ValidatingAdmissionPolicyBindingApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.UID = &value
+ return b
+}
+
+// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the ResourceVersion field is set to the value of the last call.
+func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithResourceVersion(value string) *ValidatingAdmissionPolicyBindingApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.ResourceVersion = &value
+ return b
+}
+
+// WithGeneration sets the Generation field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Generation field is set to the value of the last call.
+func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithGeneration(value int64) *ValidatingAdmissionPolicyBindingApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.Generation = &value
+ return b
+}
+
+// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the CreationTimestamp field is set to the value of the last call.
+func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ValidatingAdmissionPolicyBindingApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.CreationTimestamp = &value
+ return b
+}
+
+// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
+func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ValidatingAdmissionPolicyBindingApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.DeletionTimestamp = &value
+ return b
+}
+
+// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
+func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ValidatingAdmissionPolicyBindingApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.DeletionGracePeriodSeconds = &value
+ return b
+}
+
+// WithLabels puts the entries into the Labels field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, the entries provided by each call will be put on the Labels field,
+// overwriting an existing map entries in Labels field with the same key.
+func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithLabels(entries map[string]string) *ValidatingAdmissionPolicyBindingApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ if b.Labels == nil && len(entries) > 0 {
+ b.Labels = make(map[string]string, len(entries))
+ }
+ for k, v := range entries {
+ b.Labels[k] = v
+ }
+ return b
+}
+
+// WithAnnotations puts the entries into the Annotations field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, the entries provided by each call will be put on the Annotations field,
+// overwriting an existing map entries in Annotations field with the same key.
+func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithAnnotations(entries map[string]string) *ValidatingAdmissionPolicyBindingApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ if b.Annotations == nil && len(entries) > 0 {
+ b.Annotations = make(map[string]string, len(entries))
+ }
+ for k, v := range entries {
+ b.Annotations[k] = v
+ }
+ return b
+}
+
+// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the OwnerReferences field.
+func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ValidatingAdmissionPolicyBindingApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithOwnerReferences")
+ }
+ b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ }
+ return b
+}
+
+// WithFinalizers adds the given value to the Finalizers field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the Finalizers field.
+func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithFinalizers(values ...string) *ValidatingAdmissionPolicyBindingApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ for i := range values {
+ b.Finalizers = append(b.Finalizers, values[i])
+ }
+ return b
+}
+
+func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) ensureObjectMetaApplyConfigurationExists() {
+ if b.ObjectMetaApplyConfiguration == nil {
+ b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{}
+ }
+}
+
+// WithSpec sets the Spec field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Spec field is set to the value of the last call.
+func (b *ValidatingAdmissionPolicyBindingApplyConfiguration) WithSpec(value *ValidatingAdmissionPolicyBindingSpecApplyConfiguration) *ValidatingAdmissionPolicyBindingApplyConfiguration {
+ b.Spec = value
+ return b
+}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicybindingspec.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicybindingspec.go
new file mode 100644
index 0000000000..d20a78efff
--- /dev/null
+++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicybindingspec.go
@@ -0,0 +1,72 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+ admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1"
+)
+
+// ValidatingAdmissionPolicyBindingSpecApplyConfiguration represents an declarative configuration of the ValidatingAdmissionPolicyBindingSpec type for use
+// with apply.
+type ValidatingAdmissionPolicyBindingSpecApplyConfiguration struct {
+ PolicyName *string `json:"policyName,omitempty"`
+ ParamRef *ParamRefApplyConfiguration `json:"paramRef,omitempty"`
+ MatchResources *MatchResourcesApplyConfiguration `json:"matchResources,omitempty"`
+ ValidationActions []admissionregistrationv1beta1.ValidationAction `json:"validationActions,omitempty"`
+}
+
+// ValidatingAdmissionPolicyBindingSpecApplyConfiguration constructs an declarative configuration of the ValidatingAdmissionPolicyBindingSpec type for use with
+// apply.
+func ValidatingAdmissionPolicyBindingSpec() *ValidatingAdmissionPolicyBindingSpecApplyConfiguration {
+ return &ValidatingAdmissionPolicyBindingSpecApplyConfiguration{}
+}
+
+// WithPolicyName sets the PolicyName field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the PolicyName field is set to the value of the last call.
+func (b *ValidatingAdmissionPolicyBindingSpecApplyConfiguration) WithPolicyName(value string) *ValidatingAdmissionPolicyBindingSpecApplyConfiguration {
+ b.PolicyName = &value
+ return b
+}
+
+// WithParamRef sets the ParamRef field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the ParamRef field is set to the value of the last call.
+func (b *ValidatingAdmissionPolicyBindingSpecApplyConfiguration) WithParamRef(value *ParamRefApplyConfiguration) *ValidatingAdmissionPolicyBindingSpecApplyConfiguration {
+ b.ParamRef = value
+ return b
+}
+
+// WithMatchResources sets the MatchResources field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the MatchResources field is set to the value of the last call.
+func (b *ValidatingAdmissionPolicyBindingSpecApplyConfiguration) WithMatchResources(value *MatchResourcesApplyConfiguration) *ValidatingAdmissionPolicyBindingSpecApplyConfiguration {
+ b.MatchResources = value
+ return b
+}
+
+// WithValidationActions adds the given value to the ValidationActions field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the ValidationActions field.
+func (b *ValidatingAdmissionPolicyBindingSpecApplyConfiguration) WithValidationActions(values ...admissionregistrationv1beta1.ValidationAction) *ValidatingAdmissionPolicyBindingSpecApplyConfiguration {
+ for i := range values {
+ b.ValidationActions = append(b.ValidationActions, values[i])
+ }
+ return b
+}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicyspec.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicyspec.go
new file mode 100644
index 0000000000..c6e9389103
--- /dev/null
+++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicyspec.go
@@ -0,0 +1,117 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+ admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1"
+)
+
+// ValidatingAdmissionPolicySpecApplyConfiguration represents an declarative configuration of the ValidatingAdmissionPolicySpec type for use
+// with apply.
+type ValidatingAdmissionPolicySpecApplyConfiguration struct {
+ ParamKind *ParamKindApplyConfiguration `json:"paramKind,omitempty"`
+ MatchConstraints *MatchResourcesApplyConfiguration `json:"matchConstraints,omitempty"`
+ Validations []ValidationApplyConfiguration `json:"validations,omitempty"`
+ FailurePolicy *admissionregistrationv1beta1.FailurePolicyType `json:"failurePolicy,omitempty"`
+ AuditAnnotations []AuditAnnotationApplyConfiguration `json:"auditAnnotations,omitempty"`
+ MatchConditions []MatchConditionApplyConfiguration `json:"matchConditions,omitempty"`
+ Variables []VariableApplyConfiguration `json:"variables,omitempty"`
+}
+
+// ValidatingAdmissionPolicySpecApplyConfiguration constructs an declarative configuration of the ValidatingAdmissionPolicySpec type for use with
+// apply.
+func ValidatingAdmissionPolicySpec() *ValidatingAdmissionPolicySpecApplyConfiguration {
+ return &ValidatingAdmissionPolicySpecApplyConfiguration{}
+}
+
+// WithParamKind sets the ParamKind field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the ParamKind field is set to the value of the last call.
+func (b *ValidatingAdmissionPolicySpecApplyConfiguration) WithParamKind(value *ParamKindApplyConfiguration) *ValidatingAdmissionPolicySpecApplyConfiguration {
+ b.ParamKind = value
+ return b
+}
+
+// WithMatchConstraints sets the MatchConstraints field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the MatchConstraints field is set to the value of the last call.
+func (b *ValidatingAdmissionPolicySpecApplyConfiguration) WithMatchConstraints(value *MatchResourcesApplyConfiguration) *ValidatingAdmissionPolicySpecApplyConfiguration {
+ b.MatchConstraints = value
+ return b
+}
+
+// WithValidations adds the given value to the Validations field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the Validations field.
+func (b *ValidatingAdmissionPolicySpecApplyConfiguration) WithValidations(values ...*ValidationApplyConfiguration) *ValidatingAdmissionPolicySpecApplyConfiguration {
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithValidations")
+ }
+ b.Validations = append(b.Validations, *values[i])
+ }
+ return b
+}
+
+// WithFailurePolicy sets the FailurePolicy field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the FailurePolicy field is set to the value of the last call.
+func (b *ValidatingAdmissionPolicySpecApplyConfiguration) WithFailurePolicy(value admissionregistrationv1beta1.FailurePolicyType) *ValidatingAdmissionPolicySpecApplyConfiguration {
+ b.FailurePolicy = &value
+ return b
+}
+
+// WithAuditAnnotations adds the given value to the AuditAnnotations field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the AuditAnnotations field.
+func (b *ValidatingAdmissionPolicySpecApplyConfiguration) WithAuditAnnotations(values ...*AuditAnnotationApplyConfiguration) *ValidatingAdmissionPolicySpecApplyConfiguration {
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithAuditAnnotations")
+ }
+ b.AuditAnnotations = append(b.AuditAnnotations, *values[i])
+ }
+ return b
+}
+
+// WithMatchConditions adds the given value to the MatchConditions field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the MatchConditions field.
+func (b *ValidatingAdmissionPolicySpecApplyConfiguration) WithMatchConditions(values ...*MatchConditionApplyConfiguration) *ValidatingAdmissionPolicySpecApplyConfiguration {
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithMatchConditions")
+ }
+ b.MatchConditions = append(b.MatchConditions, *values[i])
+ }
+ return b
+}
+
+// WithVariables adds the given value to the Variables field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the Variables field.
+func (b *ValidatingAdmissionPolicySpecApplyConfiguration) WithVariables(values ...*VariableApplyConfiguration) *ValidatingAdmissionPolicySpecApplyConfiguration {
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithVariables")
+ }
+ b.Variables = append(b.Variables, *values[i])
+ }
+ return b
+}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicystatus.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicystatus.go
new file mode 100644
index 0000000000..e3e6d417ed
--- /dev/null
+++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicystatus.go
@@ -0,0 +1,66 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+ v1 "k8s.io/client-go/applyconfigurations/meta/v1"
+)
+
+// ValidatingAdmissionPolicyStatusApplyConfiguration represents an declarative configuration of the ValidatingAdmissionPolicyStatus type for use
+// with apply.
+type ValidatingAdmissionPolicyStatusApplyConfiguration struct {
+ ObservedGeneration *int64 `json:"observedGeneration,omitempty"`
+ TypeChecking *TypeCheckingApplyConfiguration `json:"typeChecking,omitempty"`
+ Conditions []v1.ConditionApplyConfiguration `json:"conditions,omitempty"`
+}
+
+// ValidatingAdmissionPolicyStatusApplyConfiguration constructs an declarative configuration of the ValidatingAdmissionPolicyStatus type for use with
+// apply.
+func ValidatingAdmissionPolicyStatus() *ValidatingAdmissionPolicyStatusApplyConfiguration {
+ return &ValidatingAdmissionPolicyStatusApplyConfiguration{}
+}
+
+// WithObservedGeneration sets the ObservedGeneration field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the ObservedGeneration field is set to the value of the last call.
+func (b *ValidatingAdmissionPolicyStatusApplyConfiguration) WithObservedGeneration(value int64) *ValidatingAdmissionPolicyStatusApplyConfiguration {
+ b.ObservedGeneration = &value
+ return b
+}
+
+// WithTypeChecking sets the TypeChecking field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the TypeChecking field is set to the value of the last call.
+func (b *ValidatingAdmissionPolicyStatusApplyConfiguration) WithTypeChecking(value *TypeCheckingApplyConfiguration) *ValidatingAdmissionPolicyStatusApplyConfiguration {
+ b.TypeChecking = value
+ return b
+}
+
+// WithConditions adds the given value to the Conditions field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the Conditions field.
+func (b *ValidatingAdmissionPolicyStatusApplyConfiguration) WithConditions(values ...*v1.ConditionApplyConfiguration) *ValidatingAdmissionPolicyStatusApplyConfiguration {
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithConditions")
+ }
+ b.Conditions = append(b.Conditions, *values[i])
+ }
+ return b
+}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validation.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validation.go
new file mode 100644
index 0000000000..ed9ff1ac0c
--- /dev/null
+++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validation.go
@@ -0,0 +1,70 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// ValidationApplyConfiguration represents an declarative configuration of the Validation type for use
+// with apply.
+type ValidationApplyConfiguration struct {
+ Expression *string `json:"expression,omitempty"`
+ Message *string `json:"message,omitempty"`
+ Reason *v1.StatusReason `json:"reason,omitempty"`
+ MessageExpression *string `json:"messageExpression,omitempty"`
+}
+
+// ValidationApplyConfiguration constructs an declarative configuration of the Validation type for use with
+// apply.
+func Validation() *ValidationApplyConfiguration {
+ return &ValidationApplyConfiguration{}
+}
+
+// WithExpression sets the Expression field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Expression field is set to the value of the last call.
+func (b *ValidationApplyConfiguration) WithExpression(value string) *ValidationApplyConfiguration {
+ b.Expression = &value
+ return b
+}
+
+// WithMessage sets the Message field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Message field is set to the value of the last call.
+func (b *ValidationApplyConfiguration) WithMessage(value string) *ValidationApplyConfiguration {
+ b.Message = &value
+ return b
+}
+
+// WithReason sets the Reason field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Reason field is set to the value of the last call.
+func (b *ValidationApplyConfiguration) WithReason(value v1.StatusReason) *ValidationApplyConfiguration {
+ b.Reason = &value
+ return b
+}
+
+// WithMessageExpression sets the MessageExpression field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the MessageExpression field is set to the value of the last call.
+func (b *ValidationApplyConfiguration) WithMessageExpression(value string) *ValidationApplyConfiguration {
+ b.MessageExpression = &value
+ return b
+}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/variable.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/variable.go
new file mode 100644
index 0000000000..0fc294c65d
--- /dev/null
+++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/variable.go
@@ -0,0 +1,48 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1beta1
+
+// VariableApplyConfiguration represents an declarative configuration of the Variable type for use
+// with apply.
+type VariableApplyConfiguration struct {
+ Name *string `json:"name,omitempty"`
+ Expression *string `json:"expression,omitempty"`
+}
+
+// VariableApplyConfiguration constructs an declarative configuration of the Variable type for use with
+// apply.
+func Variable() *VariableApplyConfiguration {
+ return &VariableApplyConfiguration{}
+}
+
+// WithName sets the Name field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Name field is set to the value of the last call.
+func (b *VariableApplyConfiguration) WithName(value string) *VariableApplyConfiguration {
+ b.Name = &value
+ return b
+}
+
+// WithExpression sets the Expression field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Expression field is set to the value of the last call.
+func (b *VariableApplyConfiguration) WithExpression(value string) *VariableApplyConfiguration {
+ b.Expression = &value
+ return b
+}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/serverstorageversion.go b/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/serverstorageversion.go
index d36f7603c7..81c56330bb 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/serverstorageversion.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/serverstorageversion.go
@@ -24,6 +24,7 @@ type ServerStorageVersionApplyConfiguration struct {
APIServerID *string `json:"apiServerID,omitempty"`
EncodingVersion *string `json:"encodingVersion,omitempty"`
DecodableVersions []string `json:"decodableVersions,omitempty"`
+ ServedVersions []string `json:"servedVersions,omitempty"`
}
// ServerStorageVersionApplyConfiguration constructs an declarative configuration of the ServerStorageVersion type for use with
@@ -57,3 +58,13 @@ func (b *ServerStorageVersionApplyConfiguration) WithDecodableVersions(values ..
}
return b
}
+
+// WithServedVersions adds the given value to the ServedVersions field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the ServedVersions field.
+func (b *ServerStorageVersionApplyConfiguration) WithServedVersions(values ...string) *ServerStorageVersionApplyConfiguration {
+ for i := range values {
+ b.ServedVersions = append(b.ServedVersions, values[i])
+ }
+ return b
+}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobspec.go b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobspec.go
index 839d88b64e..3d46a3ecf9 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobspec.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobspec.go
@@ -32,12 +32,15 @@ type JobSpecApplyConfiguration struct {
ActiveDeadlineSeconds *int64 `json:"activeDeadlineSeconds,omitempty"`
PodFailurePolicy *PodFailurePolicyApplyConfiguration `json:"podFailurePolicy,omitempty"`
BackoffLimit *int32 `json:"backoffLimit,omitempty"`
+ BackoffLimitPerIndex *int32 `json:"backoffLimitPerIndex,omitempty"`
+ MaxFailedIndexes *int32 `json:"maxFailedIndexes,omitempty"`
Selector *metav1.LabelSelectorApplyConfiguration `json:"selector,omitempty"`
ManualSelector *bool `json:"manualSelector,omitempty"`
Template *corev1.PodTemplateSpecApplyConfiguration `json:"template,omitempty"`
TTLSecondsAfterFinished *int32 `json:"ttlSecondsAfterFinished,omitempty"`
CompletionMode *batchv1.CompletionMode `json:"completionMode,omitempty"`
Suspend *bool `json:"suspend,omitempty"`
+ PodReplacementPolicy *batchv1.PodReplacementPolicy `json:"podReplacementPolicy,omitempty"`
}
// JobSpecApplyConfiguration constructs an declarative configuration of the JobSpec type for use with
@@ -86,6 +89,22 @@ func (b *JobSpecApplyConfiguration) WithBackoffLimit(value int32) *JobSpecApplyC
return b
}
+// WithBackoffLimitPerIndex sets the BackoffLimitPerIndex field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the BackoffLimitPerIndex field is set to the value of the last call.
+func (b *JobSpecApplyConfiguration) WithBackoffLimitPerIndex(value int32) *JobSpecApplyConfiguration {
+ b.BackoffLimitPerIndex = &value
+ return b
+}
+
+// WithMaxFailedIndexes sets the MaxFailedIndexes field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the MaxFailedIndexes field is set to the value of the last call.
+func (b *JobSpecApplyConfiguration) WithMaxFailedIndexes(value int32) *JobSpecApplyConfiguration {
+ b.MaxFailedIndexes = &value
+ return b
+}
+
// WithSelector sets the Selector field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Selector field is set to the value of the last call.
@@ -133,3 +152,11 @@ func (b *JobSpecApplyConfiguration) WithSuspend(value bool) *JobSpecApplyConfigu
b.Suspend = &value
return b
}
+
+// WithPodReplacementPolicy sets the PodReplacementPolicy field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the PodReplacementPolicy field is set to the value of the last call.
+func (b *JobSpecApplyConfiguration) WithPodReplacementPolicy(value batchv1.PodReplacementPolicy) *JobSpecApplyConfiguration {
+ b.PodReplacementPolicy = &value
+ return b
+}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobstatus.go b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobstatus.go
index a36d5d0ae1..e8e472f8f7 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobstatus.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobstatus.go
@@ -31,7 +31,9 @@ type JobStatusApplyConfiguration struct {
Active *int32 `json:"active,omitempty"`
Succeeded *int32 `json:"succeeded,omitempty"`
Failed *int32 `json:"failed,omitempty"`
+ Terminating *int32 `json:"terminating,omitempty"`
CompletedIndexes *string `json:"completedIndexes,omitempty"`
+ FailedIndexes *string `json:"failedIndexes,omitempty"`
UncountedTerminatedPods *UncountedTerminatedPodsApplyConfiguration `json:"uncountedTerminatedPods,omitempty"`
Ready *int32 `json:"ready,omitempty"`
}
@@ -95,6 +97,14 @@ func (b *JobStatusApplyConfiguration) WithFailed(value int32) *JobStatusApplyCon
return b
}
+// WithTerminating sets the Terminating field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Terminating field is set to the value of the last call.
+func (b *JobStatusApplyConfiguration) WithTerminating(value int32) *JobStatusApplyConfiguration {
+ b.Terminating = &value
+ return b
+}
+
// WithCompletedIndexes sets the CompletedIndexes field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the CompletedIndexes field is set to the value of the last call.
@@ -103,6 +113,14 @@ func (b *JobStatusApplyConfiguration) WithCompletedIndexes(value string) *JobSta
return b
}
+// WithFailedIndexes sets the FailedIndexes field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the FailedIndexes field is set to the value of the last call.
+func (b *JobStatusApplyConfiguration) WithFailedIndexes(value string) *JobStatusApplyConfiguration {
+ b.FailedIndexes = &value
+ return b
+}
+
// WithUncountedTerminatedPods sets the UncountedTerminatedPods field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the UncountedTerminatedPods field is set to the value of the last call.
diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/container.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/container.go
index 9ada59ee20..32d7156063 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/container.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/container.go
@@ -35,6 +35,7 @@ type ContainerApplyConfiguration struct {
Env []EnvVarApplyConfiguration `json:"env,omitempty"`
Resources *ResourceRequirementsApplyConfiguration `json:"resources,omitempty"`
ResizePolicy []ContainerResizePolicyApplyConfiguration `json:"resizePolicy,omitempty"`
+ RestartPolicy *corev1.ContainerRestartPolicy `json:"restartPolicy,omitempty"`
VolumeMounts []VolumeMountApplyConfiguration `json:"volumeMounts,omitempty"`
VolumeDevices []VolumeDeviceApplyConfiguration `json:"volumeDevices,omitempty"`
LivenessProbe *ProbeApplyConfiguration `json:"livenessProbe,omitempty"`
@@ -160,6 +161,14 @@ func (b *ContainerApplyConfiguration) WithResizePolicy(values ...*ContainerResiz
return b
}
+// WithRestartPolicy sets the RestartPolicy field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the RestartPolicy field is set to the value of the last call.
+func (b *ContainerApplyConfiguration) WithRestartPolicy(value corev1.ContainerRestartPolicy) *ContainerApplyConfiguration {
+ b.RestartPolicy = &value
+ return b
+}
+
// WithVolumeMounts adds the given value to the VolumeMounts field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the VolumeMounts field.
diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/ephemeralcontainer.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/ephemeralcontainer.go
index c51049ba1f..5fa79a246e 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/ephemeralcontainer.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/ephemeralcontainer.go
@@ -139,6 +139,14 @@ func (b *EphemeralContainerApplyConfiguration) WithResizePolicy(values ...*Conta
return b
}
+// WithRestartPolicy sets the RestartPolicy field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the RestartPolicy field is set to the value of the last call.
+func (b *EphemeralContainerApplyConfiguration) WithRestartPolicy(value corev1.ContainerRestartPolicy) *EphemeralContainerApplyConfiguration {
+ b.RestartPolicy = &value
+ return b
+}
+
// WithVolumeMounts adds the given value to the VolumeMounts field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the VolumeMounts field.
diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/ephemeralcontainercommon.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/ephemeralcontainercommon.go
index 764b830e04..8cded29a9e 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/ephemeralcontainercommon.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/ephemeralcontainercommon.go
@@ -35,6 +35,7 @@ type EphemeralContainerCommonApplyConfiguration struct {
Env []EnvVarApplyConfiguration `json:"env,omitempty"`
Resources *ResourceRequirementsApplyConfiguration `json:"resources,omitempty"`
ResizePolicy []ContainerResizePolicyApplyConfiguration `json:"resizePolicy,omitempty"`
+ RestartPolicy *corev1.ContainerRestartPolicy `json:"restartPolicy,omitempty"`
VolumeMounts []VolumeMountApplyConfiguration `json:"volumeMounts,omitempty"`
VolumeDevices []VolumeDeviceApplyConfiguration `json:"volumeDevices,omitempty"`
LivenessProbe *ProbeApplyConfiguration `json:"livenessProbe,omitempty"`
@@ -160,6 +161,14 @@ func (b *EphemeralContainerCommonApplyConfiguration) WithResizePolicy(values ...
return b
}
+// WithRestartPolicy sets the RestartPolicy field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the RestartPolicy field is set to the value of the last call.
+func (b *EphemeralContainerCommonApplyConfiguration) WithRestartPolicy(value corev1.ContainerRestartPolicy) *EphemeralContainerCommonApplyConfiguration {
+ b.RestartPolicy = &value
+ return b
+}
+
// WithVolumeMounts adds the given value to the VolumeMounts field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the VolumeMounts field.
diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/hostip.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/hostip.go
new file mode 100644
index 0000000000..c2a42cf747
--- /dev/null
+++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/hostip.go
@@ -0,0 +1,39 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+// HostIPApplyConfiguration represents an declarative configuration of the HostIP type for use
+// with apply.
+type HostIPApplyConfiguration struct {
+ IP *string `json:"ip,omitempty"`
+}
+
+// HostIPApplyConfiguration constructs an declarative configuration of the HostIP type for use with
+// apply.
+func HostIP() *HostIPApplyConfiguration {
+ return &HostIPApplyConfiguration{}
+}
+
+// WithIP sets the IP field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the IP field is set to the value of the last call.
+func (b *HostIPApplyConfiguration) WithIP(value string) *HostIPApplyConfiguration {
+ b.IP = &value
+ return b
+}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimstatus.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimstatus.go
index 4c38d89f57..c29b2a9a15 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimstatus.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimstatus.go
@@ -25,12 +25,12 @@ import (
// PersistentVolumeClaimStatusApplyConfiguration represents an declarative configuration of the PersistentVolumeClaimStatus type for use
// with apply.
type PersistentVolumeClaimStatusApplyConfiguration struct {
- Phase *v1.PersistentVolumeClaimPhase `json:"phase,omitempty"`
- AccessModes []v1.PersistentVolumeAccessMode `json:"accessModes,omitempty"`
- Capacity *v1.ResourceList `json:"capacity,omitempty"`
- Conditions []PersistentVolumeClaimConditionApplyConfiguration `json:"conditions,omitempty"`
- AllocatedResources *v1.ResourceList `json:"allocatedResources,omitempty"`
- ResizeStatus *v1.PersistentVolumeClaimResizeStatus `json:"resizeStatus,omitempty"`
+ Phase *v1.PersistentVolumeClaimPhase `json:"phase,omitempty"`
+ AccessModes []v1.PersistentVolumeAccessMode `json:"accessModes,omitempty"`
+ Capacity *v1.ResourceList `json:"capacity,omitempty"`
+ Conditions []PersistentVolumeClaimConditionApplyConfiguration `json:"conditions,omitempty"`
+ AllocatedResources *v1.ResourceList `json:"allocatedResources,omitempty"`
+ AllocatedResourceStatuses map[v1.ResourceName]v1.ClaimResourceStatus `json:"allocatedResourceStatuses,omitempty"`
}
// PersistentVolumeClaimStatusApplyConfiguration constructs an declarative configuration of the PersistentVolumeClaimStatus type for use with
@@ -86,10 +86,16 @@ func (b *PersistentVolumeClaimStatusApplyConfiguration) WithAllocatedResources(v
return b
}
-// WithResizeStatus sets the ResizeStatus field in the declarative configuration to the given value
-// and returns the receiver, so that objects can be built by chaining "With" function invocations.
-// If called multiple times, the ResizeStatus field is set to the value of the last call.
-func (b *PersistentVolumeClaimStatusApplyConfiguration) WithResizeStatus(value v1.PersistentVolumeClaimResizeStatus) *PersistentVolumeClaimStatusApplyConfiguration {
- b.ResizeStatus = &value
+// WithAllocatedResourceStatuses puts the entries into the AllocatedResourceStatuses field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, the entries provided by each call will be put on the AllocatedResourceStatuses field,
+// overwriting an existing map entries in AllocatedResourceStatuses field with the same key.
+func (b *PersistentVolumeClaimStatusApplyConfiguration) WithAllocatedResourceStatuses(entries map[v1.ResourceName]v1.ClaimResourceStatus) *PersistentVolumeClaimStatusApplyConfiguration {
+ if b.AllocatedResourceStatuses == nil && len(entries) > 0 {
+ b.AllocatedResourceStatuses = make(map[v1.ResourceName]v1.ClaimResourceStatus, len(entries))
+ }
+ for k, v := range entries {
+ b.AllocatedResourceStatuses[k] = v
+ }
return b
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumestatus.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumestatus.go
index f7048dec4e..a473c0e927 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumestatus.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumestatus.go
@@ -20,14 +20,16 @@ package v1
import (
v1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// PersistentVolumeStatusApplyConfiguration represents an declarative configuration of the PersistentVolumeStatus type for use
// with apply.
type PersistentVolumeStatusApplyConfiguration struct {
- Phase *v1.PersistentVolumePhase `json:"phase,omitempty"`
- Message *string `json:"message,omitempty"`
- Reason *string `json:"reason,omitempty"`
+ Phase *v1.PersistentVolumePhase `json:"phase,omitempty"`
+ Message *string `json:"message,omitempty"`
+ Reason *string `json:"reason,omitempty"`
+ LastPhaseTransitionTime *metav1.Time `json:"lastPhaseTransitionTime,omitempty"`
}
// PersistentVolumeStatusApplyConfiguration constructs an declarative configuration of the PersistentVolumeStatus type for use with
@@ -59,3 +61,11 @@ func (b *PersistentVolumeStatusApplyConfiguration) WithReason(value string) *Per
b.Reason = &value
return b
}
+
+// WithLastPhaseTransitionTime sets the LastPhaseTransitionTime field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the LastPhaseTransitionTime field is set to the value of the last call.
+func (b *PersistentVolumeStatusApplyConfiguration) WithLastPhaseTransitionTime(value metav1.Time) *PersistentVolumeStatusApplyConfiguration {
+ b.LastPhaseTransitionTime = &value
+ return b
+}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podresourceclaimstatus.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podresourceclaimstatus.go
new file mode 100644
index 0000000000..ae79ca01b7
--- /dev/null
+++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podresourceclaimstatus.go
@@ -0,0 +1,48 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+// PodResourceClaimStatusApplyConfiguration represents an declarative configuration of the PodResourceClaimStatus type for use
+// with apply.
+type PodResourceClaimStatusApplyConfiguration struct {
+ Name *string `json:"name,omitempty"`
+ ResourceClaimName *string `json:"resourceClaimName,omitempty"`
+}
+
+// PodResourceClaimStatusApplyConfiguration constructs an declarative configuration of the PodResourceClaimStatus type for use with
+// apply.
+func PodResourceClaimStatus() *PodResourceClaimStatusApplyConfiguration {
+ return &PodResourceClaimStatusApplyConfiguration{}
+}
+
+// WithName sets the Name field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Name field is set to the value of the last call.
+func (b *PodResourceClaimStatusApplyConfiguration) WithName(value string) *PodResourceClaimStatusApplyConfiguration {
+ b.Name = &value
+ return b
+}
+
+// WithResourceClaimName sets the ResourceClaimName field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the ResourceClaimName field is set to the value of the last call.
+func (b *PodResourceClaimStatusApplyConfiguration) WithResourceClaimName(value string) *PodResourceClaimStatusApplyConfiguration {
+ b.ResourceClaimName = &value
+ return b
+}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podstatus.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podstatus.go
index e9d8e5b28f..1a58ab6be2 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podstatus.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podstatus.go
@@ -26,20 +26,22 @@ import (
// PodStatusApplyConfiguration represents an declarative configuration of the PodStatus type for use
// with apply.
type PodStatusApplyConfiguration struct {
- Phase *v1.PodPhase `json:"phase,omitempty"`
- Conditions []PodConditionApplyConfiguration `json:"conditions,omitempty"`
- Message *string `json:"message,omitempty"`
- Reason *string `json:"reason,omitempty"`
- NominatedNodeName *string `json:"nominatedNodeName,omitempty"`
- HostIP *string `json:"hostIP,omitempty"`
- PodIP *string `json:"podIP,omitempty"`
- PodIPs []PodIPApplyConfiguration `json:"podIPs,omitempty"`
- StartTime *metav1.Time `json:"startTime,omitempty"`
- InitContainerStatuses []ContainerStatusApplyConfiguration `json:"initContainerStatuses,omitempty"`
- ContainerStatuses []ContainerStatusApplyConfiguration `json:"containerStatuses,omitempty"`
- QOSClass *v1.PodQOSClass `json:"qosClass,omitempty"`
- EphemeralContainerStatuses []ContainerStatusApplyConfiguration `json:"ephemeralContainerStatuses,omitempty"`
- Resize *v1.PodResizeStatus `json:"resize,omitempty"`
+ Phase *v1.PodPhase `json:"phase,omitempty"`
+ Conditions []PodConditionApplyConfiguration `json:"conditions,omitempty"`
+ Message *string `json:"message,omitempty"`
+ Reason *string `json:"reason,omitempty"`
+ NominatedNodeName *string `json:"nominatedNodeName,omitempty"`
+ HostIP *string `json:"hostIP,omitempty"`
+ HostIPs []HostIPApplyConfiguration `json:"hostIPs,omitempty"`
+ PodIP *string `json:"podIP,omitempty"`
+ PodIPs []PodIPApplyConfiguration `json:"podIPs,omitempty"`
+ StartTime *metav1.Time `json:"startTime,omitempty"`
+ InitContainerStatuses []ContainerStatusApplyConfiguration `json:"initContainerStatuses,omitempty"`
+ ContainerStatuses []ContainerStatusApplyConfiguration `json:"containerStatuses,omitempty"`
+ QOSClass *v1.PodQOSClass `json:"qosClass,omitempty"`
+ EphemeralContainerStatuses []ContainerStatusApplyConfiguration `json:"ephemeralContainerStatuses,omitempty"`
+ Resize *v1.PodResizeStatus `json:"resize,omitempty"`
+ ResourceClaimStatuses []PodResourceClaimStatusApplyConfiguration `json:"resourceClaimStatuses,omitempty"`
}
// PodStatusApplyConfiguration constructs an declarative configuration of the PodStatus type for use with
@@ -101,6 +103,19 @@ func (b *PodStatusApplyConfiguration) WithHostIP(value string) *PodStatusApplyCo
return b
}
+// WithHostIPs adds the given value to the HostIPs field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the HostIPs field.
+func (b *PodStatusApplyConfiguration) WithHostIPs(values ...*HostIPApplyConfiguration) *PodStatusApplyConfiguration {
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithHostIPs")
+ }
+ b.HostIPs = append(b.HostIPs, *values[i])
+ }
+ return b
+}
+
// WithPodIP sets the PodIP field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the PodIP field is set to the value of the last call.
@@ -184,3 +199,16 @@ func (b *PodStatusApplyConfiguration) WithResize(value v1.PodResizeStatus) *PodS
b.Resize = &value
return b
}
+
+// WithResourceClaimStatuses adds the given value to the ResourceClaimStatuses field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the ResourceClaimStatuses field.
+func (b *PodStatusApplyConfiguration) WithResourceClaimStatuses(values ...*PodResourceClaimStatusApplyConfiguration) *PodStatusApplyConfiguration {
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithResourceClaimStatuses")
+ }
+ b.ResourceClaimStatuses = append(b.ResourceClaimStatuses, *values[i])
+ }
+ return b
+}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicy.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicy.go
index 81c84d2d46..27ea5d9dde 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicy.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicy.go
@@ -32,8 +32,7 @@ import (
type NetworkPolicyApplyConfiguration struct {
v1.TypeMetaApplyConfiguration `json:",inline"`
*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *NetworkPolicySpecApplyConfiguration `json:"spec,omitempty"`
- Status *NetworkPolicyStatusApplyConfiguration `json:"status,omitempty"`
+ Spec *NetworkPolicySpecApplyConfiguration `json:"spec,omitempty"`
}
// NetworkPolicy constructs an declarative configuration of the NetworkPolicy type for use with
@@ -248,11 +247,3 @@ func (b *NetworkPolicyApplyConfiguration) WithSpec(value *NetworkPolicySpecApply
b.Spec = value
return b
}
-
-// WithStatus sets the Status field in the declarative configuration to the given value
-// and returns the receiver, so that objects can be built by chaining "With" function invocations.
-// If called multiple times, the Status field is set to the value of the last call.
-func (b *NetworkPolicyApplyConfiguration) WithStatus(value *NetworkPolicyStatusApplyConfiguration) *NetworkPolicyApplyConfiguration {
- b.Status = value
- return b
-}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicystatus.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicystatus.go
deleted file mode 100644
index 99c89b09b0..0000000000
--- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicystatus.go
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
-Copyright The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Code generated by applyconfiguration-gen. DO NOT EDIT.
-
-package v1beta1
-
-import (
- v1 "k8s.io/client-go/applyconfigurations/meta/v1"
-)
-
-// NetworkPolicyStatusApplyConfiguration represents an declarative configuration of the NetworkPolicyStatus type for use
-// with apply.
-type NetworkPolicyStatusApplyConfiguration struct {
- Conditions []v1.ConditionApplyConfiguration `json:"conditions,omitempty"`
-}
-
-// NetworkPolicyStatusApplyConfiguration constructs an declarative configuration of the NetworkPolicyStatus type for use with
-// apply.
-func NetworkPolicyStatus() *NetworkPolicyStatusApplyConfiguration {
- return &NetworkPolicyStatusApplyConfiguration{}
-}
-
-// WithConditions adds the given value to the Conditions field in the declarative configuration
-// and returns the receiver, so that objects can be build by chaining "With" function invocations.
-// If called multiple times, values provided by each call will be appended to the Conditions field.
-func (b *NetworkPolicyStatusApplyConfiguration) WithConditions(values ...*v1.ConditionApplyConfiguration) *NetworkPolicyStatusApplyConfiguration {
- for i := range values {
- if values[i] == nil {
- panic("nil value passed to WithConditions")
- }
- b.Conditions = append(b.Conditions, *values[i])
- }
- return b
-}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/exemptprioritylevelconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/exemptprioritylevelconfiguration.go
new file mode 100644
index 0000000000..3535d74787
--- /dev/null
+++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/exemptprioritylevelconfiguration.go
@@ -0,0 +1,48 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1alpha1
+
+// ExemptPriorityLevelConfigurationApplyConfiguration represents an declarative configuration of the ExemptPriorityLevelConfiguration type for use
+// with apply.
+type ExemptPriorityLevelConfigurationApplyConfiguration struct {
+ NominalConcurrencyShares *int32 `json:"nominalConcurrencyShares,omitempty"`
+ LendablePercent *int32 `json:"lendablePercent,omitempty"`
+}
+
+// ExemptPriorityLevelConfigurationApplyConfiguration constructs an declarative configuration of the ExemptPriorityLevelConfiguration type for use with
+// apply.
+func ExemptPriorityLevelConfiguration() *ExemptPriorityLevelConfigurationApplyConfiguration {
+ return &ExemptPriorityLevelConfigurationApplyConfiguration{}
+}
+
+// WithNominalConcurrencyShares sets the NominalConcurrencyShares field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the NominalConcurrencyShares field is set to the value of the last call.
+func (b *ExemptPriorityLevelConfigurationApplyConfiguration) WithNominalConcurrencyShares(value int32) *ExemptPriorityLevelConfigurationApplyConfiguration {
+ b.NominalConcurrencyShares = &value
+ return b
+}
+
+// WithLendablePercent sets the LendablePercent field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the LendablePercent field is set to the value of the last call.
+func (b *ExemptPriorityLevelConfigurationApplyConfiguration) WithLendablePercent(value int32) *ExemptPriorityLevelConfigurationApplyConfiguration {
+ b.LendablePercent = &value
+ return b
+}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/prioritylevelconfigurationspec.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/prioritylevelconfigurationspec.go
index 3949dee46d..ade920a755 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/prioritylevelconfigurationspec.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/prioritylevelconfigurationspec.go
@@ -27,6 +27,7 @@ import (
type PriorityLevelConfigurationSpecApplyConfiguration struct {
Type *v1alpha1.PriorityLevelEnablement `json:"type,omitempty"`
Limited *LimitedPriorityLevelConfigurationApplyConfiguration `json:"limited,omitempty"`
+ Exempt *ExemptPriorityLevelConfigurationApplyConfiguration `json:"exempt,omitempty"`
}
// PriorityLevelConfigurationSpecApplyConfiguration constructs an declarative configuration of the PriorityLevelConfigurationSpec type for use with
@@ -50,3 +51,11 @@ func (b *PriorityLevelConfigurationSpecApplyConfiguration) WithLimited(value *Li
b.Limited = value
return b
}
+
+// WithExempt sets the Exempt field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Exempt field is set to the value of the last call.
+func (b *PriorityLevelConfigurationSpecApplyConfiguration) WithExempt(value *ExemptPriorityLevelConfigurationApplyConfiguration) *PriorityLevelConfigurationSpecApplyConfiguration {
+ b.Exempt = value
+ return b
+}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/exemptprioritylevelconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/exemptprioritylevelconfiguration.go
new file mode 100644
index 0000000000..0710480900
--- /dev/null
+++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/exemptprioritylevelconfiguration.go
@@ -0,0 +1,48 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1beta1
+
+// ExemptPriorityLevelConfigurationApplyConfiguration represents an declarative configuration of the ExemptPriorityLevelConfiguration type for use
+// with apply.
+type ExemptPriorityLevelConfigurationApplyConfiguration struct {
+ NominalConcurrencyShares *int32 `json:"nominalConcurrencyShares,omitempty"`
+ LendablePercent *int32 `json:"lendablePercent,omitempty"`
+}
+
+// ExemptPriorityLevelConfigurationApplyConfiguration constructs an declarative configuration of the ExemptPriorityLevelConfiguration type for use with
+// apply.
+func ExemptPriorityLevelConfiguration() *ExemptPriorityLevelConfigurationApplyConfiguration {
+ return &ExemptPriorityLevelConfigurationApplyConfiguration{}
+}
+
+// WithNominalConcurrencyShares sets the NominalConcurrencyShares field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the NominalConcurrencyShares field is set to the value of the last call.
+func (b *ExemptPriorityLevelConfigurationApplyConfiguration) WithNominalConcurrencyShares(value int32) *ExemptPriorityLevelConfigurationApplyConfiguration {
+ b.NominalConcurrencyShares = &value
+ return b
+}
+
+// WithLendablePercent sets the LendablePercent field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the LendablePercent field is set to the value of the last call.
+func (b *ExemptPriorityLevelConfigurationApplyConfiguration) WithLendablePercent(value int32) *ExemptPriorityLevelConfigurationApplyConfiguration {
+ b.LendablePercent = &value
+ return b
+}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfigurationspec.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfigurationspec.go
index 8ed4e399f8..19146d9f66 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfigurationspec.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfigurationspec.go
@@ -27,6 +27,7 @@ import (
type PriorityLevelConfigurationSpecApplyConfiguration struct {
Type *v1beta1.PriorityLevelEnablement `json:"type,omitempty"`
Limited *LimitedPriorityLevelConfigurationApplyConfiguration `json:"limited,omitempty"`
+ Exempt *ExemptPriorityLevelConfigurationApplyConfiguration `json:"exempt,omitempty"`
}
// PriorityLevelConfigurationSpecApplyConfiguration constructs an declarative configuration of the PriorityLevelConfigurationSpec type for use with
@@ -50,3 +51,11 @@ func (b *PriorityLevelConfigurationSpecApplyConfiguration) WithLimited(value *Li
b.Limited = value
return b
}
+
+// WithExempt sets the Exempt field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Exempt field is set to the value of the last call.
+func (b *PriorityLevelConfigurationSpecApplyConfiguration) WithExempt(value *ExemptPriorityLevelConfigurationApplyConfiguration) *PriorityLevelConfigurationSpecApplyConfiguration {
+ b.Exempt = value
+ return b
+}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/exemptprioritylevelconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/exemptprioritylevelconfiguration.go
new file mode 100644
index 0000000000..d6bc330fe7
--- /dev/null
+++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/exemptprioritylevelconfiguration.go
@@ -0,0 +1,48 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1beta2
+
+// ExemptPriorityLevelConfigurationApplyConfiguration represents an declarative configuration of the ExemptPriorityLevelConfiguration type for use
+// with apply.
+type ExemptPriorityLevelConfigurationApplyConfiguration struct {
+ NominalConcurrencyShares *int32 `json:"nominalConcurrencyShares,omitempty"`
+ LendablePercent *int32 `json:"lendablePercent,omitempty"`
+}
+
+// ExemptPriorityLevelConfigurationApplyConfiguration constructs an declarative configuration of the ExemptPriorityLevelConfiguration type for use with
+// apply.
+func ExemptPriorityLevelConfiguration() *ExemptPriorityLevelConfigurationApplyConfiguration {
+ return &ExemptPriorityLevelConfigurationApplyConfiguration{}
+}
+
+// WithNominalConcurrencyShares sets the NominalConcurrencyShares field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the NominalConcurrencyShares field is set to the value of the last call.
+func (b *ExemptPriorityLevelConfigurationApplyConfiguration) WithNominalConcurrencyShares(value int32) *ExemptPriorityLevelConfigurationApplyConfiguration {
+ b.NominalConcurrencyShares = &value
+ return b
+}
+
+// WithLendablePercent sets the LendablePercent field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the LendablePercent field is set to the value of the last call.
+func (b *ExemptPriorityLevelConfigurationApplyConfiguration) WithLendablePercent(value int32) *ExemptPriorityLevelConfigurationApplyConfiguration {
+ b.LendablePercent = &value
+ return b
+}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationspec.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationspec.go
index 5560ed9e56..994a8a16a2 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationspec.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationspec.go
@@ -27,6 +27,7 @@ import (
type PriorityLevelConfigurationSpecApplyConfiguration struct {
Type *v1beta2.PriorityLevelEnablement `json:"type,omitempty"`
Limited *LimitedPriorityLevelConfigurationApplyConfiguration `json:"limited,omitempty"`
+ Exempt *ExemptPriorityLevelConfigurationApplyConfiguration `json:"exempt,omitempty"`
}
// PriorityLevelConfigurationSpecApplyConfiguration constructs an declarative configuration of the PriorityLevelConfigurationSpec type for use with
@@ -50,3 +51,11 @@ func (b *PriorityLevelConfigurationSpecApplyConfiguration) WithLimited(value *Li
b.Limited = value
return b
}
+
+// WithExempt sets the Exempt field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Exempt field is set to the value of the last call.
+func (b *PriorityLevelConfigurationSpecApplyConfiguration) WithExempt(value *ExemptPriorityLevelConfigurationApplyConfiguration) *PriorityLevelConfigurationSpecApplyConfiguration {
+ b.Exempt = value
+ return b
+}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/exemptprioritylevelconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/exemptprioritylevelconfiguration.go
new file mode 100644
index 0000000000..b03c11d0d9
--- /dev/null
+++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/exemptprioritylevelconfiguration.go
@@ -0,0 +1,48 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1beta3
+
+// ExemptPriorityLevelConfigurationApplyConfiguration represents an declarative configuration of the ExemptPriorityLevelConfiguration type for use
+// with apply.
+type ExemptPriorityLevelConfigurationApplyConfiguration struct {
+ NominalConcurrencyShares *int32 `json:"nominalConcurrencyShares,omitempty"`
+ LendablePercent *int32 `json:"lendablePercent,omitempty"`
+}
+
+// ExemptPriorityLevelConfigurationApplyConfiguration constructs an declarative configuration of the ExemptPriorityLevelConfiguration type for use with
+// apply.
+func ExemptPriorityLevelConfiguration() *ExemptPriorityLevelConfigurationApplyConfiguration {
+ return &ExemptPriorityLevelConfigurationApplyConfiguration{}
+}
+
+// WithNominalConcurrencyShares sets the NominalConcurrencyShares field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the NominalConcurrencyShares field is set to the value of the last call.
+func (b *ExemptPriorityLevelConfigurationApplyConfiguration) WithNominalConcurrencyShares(value int32) *ExemptPriorityLevelConfigurationApplyConfiguration {
+ b.NominalConcurrencyShares = &value
+ return b
+}
+
+// WithLendablePercent sets the LendablePercent field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the LendablePercent field is set to the value of the last call.
+func (b *ExemptPriorityLevelConfigurationApplyConfiguration) WithLendablePercent(value int32) *ExemptPriorityLevelConfigurationApplyConfiguration {
+ b.LendablePercent = &value
+ return b
+}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationspec.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationspec.go
index f67f394455..5b0680d912 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationspec.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationspec.go
@@ -27,6 +27,7 @@ import (
type PriorityLevelConfigurationSpecApplyConfiguration struct {
Type *v1beta3.PriorityLevelEnablement `json:"type,omitempty"`
Limited *LimitedPriorityLevelConfigurationApplyConfiguration `json:"limited,omitempty"`
+ Exempt *ExemptPriorityLevelConfigurationApplyConfiguration `json:"exempt,omitempty"`
}
// PriorityLevelConfigurationSpecApplyConfiguration constructs an declarative configuration of the PriorityLevelConfigurationSpec type for use with
@@ -50,3 +51,11 @@ func (b *PriorityLevelConfigurationSpecApplyConfiguration) WithLimited(value *Li
b.Limited = value
return b
}
+
+// WithExempt sets the Exempt field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Exempt field is set to the value of the last call.
+func (b *PriorityLevelConfigurationSpecApplyConfiguration) WithExempt(value *ExemptPriorityLevelConfigurationApplyConfiguration) *PriorityLevelConfigurationSpecApplyConfiguration {
+ b.Exempt = value
+ return b
+}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/internal/internal.go b/vendor/k8s.io/client-go/applyconfigurations/internal/internal.go
index 361b2f4e85..3ed553662f 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/internal/internal.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/internal/internal.go
@@ -366,6 +366,12 @@ var schemaYAML = typed.YAMLObject(`types:
- name: namespace
type:
scalar: string
+ - name: parameterNotFoundAction
+ type:
+ scalar: string
+ - name: selector
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector
elementRelationship: atomic
- name: io.k8s.api.admissionregistration.v1alpha1.TypeChecking
map:
@@ -464,6 +470,14 @@ var schemaYAML = typed.YAMLObject(`types:
elementType:
namedType: io.k8s.api.admissionregistration.v1alpha1.Validation
elementRelationship: atomic
+ - name: variables
+ type:
+ list:
+ elementType:
+ namedType: io.k8s.api.admissionregistration.v1alpha1.Variable
+ elementRelationship: associative
+ keys:
+ - name
- name: io.k8s.api.admissionregistration.v1alpha1.ValidatingAdmissionPolicyStatus
map:
fields:
@@ -497,6 +511,39 @@ var schemaYAML = typed.YAMLObject(`types:
- name: reason
type:
scalar: string
+- name: io.k8s.api.admissionregistration.v1alpha1.Variable
+ map:
+ fields:
+ - name: expression
+ type:
+ scalar: string
+ default: ""
+ - name: name
+ type:
+ scalar: string
+ default: ""
+- name: io.k8s.api.admissionregistration.v1beta1.AuditAnnotation
+ map:
+ fields:
+ - name: key
+ type:
+ scalar: string
+ default: ""
+ - name: valueExpression
+ type:
+ scalar: string
+ default: ""
+- name: io.k8s.api.admissionregistration.v1beta1.ExpressionWarning
+ map:
+ fields:
+ - name: fieldRef
+ type:
+ scalar: string
+ default: ""
+ - name: warning
+ type:
+ scalar: string
+ default: ""
- name: io.k8s.api.admissionregistration.v1beta1.MatchCondition
map:
fields:
@@ -508,6 +555,31 @@ var schemaYAML = typed.YAMLObject(`types:
type:
scalar: string
default: ""
+- name: io.k8s.api.admissionregistration.v1beta1.MatchResources
+ map:
+ fields:
+ - name: excludeResourceRules
+ type:
+ list:
+ elementType:
+ namedType: io.k8s.api.admissionregistration.v1beta1.NamedRuleWithOperations
+ elementRelationship: atomic
+ - name: matchPolicy
+ type:
+ scalar: string
+ - name: namespaceSelector
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector
+ - name: objectSelector
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector
+ - name: resourceRules
+ type:
+ list:
+ elementType:
+ namedType: io.k8s.api.admissionregistration.v1beta1.NamedRuleWithOperations
+ elementRelationship: atomic
+ elementRelationship: atomic
- name: io.k8s.api.admissionregistration.v1beta1.MutatingWebhook
map:
fields:
@@ -581,6 +653,69 @@ var schemaYAML = typed.YAMLObject(`types:
elementRelationship: associative
keys:
- name
+- name: io.k8s.api.admissionregistration.v1beta1.NamedRuleWithOperations
+ map:
+ fields:
+ - name: apiGroups
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: atomic
+ - name: apiVersions
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: atomic
+ - name: operations
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: atomic
+ - name: resourceNames
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: atomic
+ - name: resources
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: atomic
+ - name: scope
+ type:
+ scalar: string
+ elementRelationship: atomic
+- name: io.k8s.api.admissionregistration.v1beta1.ParamKind
+ map:
+ fields:
+ - name: apiVersion
+ type:
+ scalar: string
+ - name: kind
+ type:
+ scalar: string
+ elementRelationship: atomic
+- name: io.k8s.api.admissionregistration.v1beta1.ParamRef
+ map:
+ fields:
+ - name: name
+ type:
+ scalar: string
+ - name: namespace
+ type:
+ scalar: string
+ - name: parameterNotFoundAction
+ type:
+ scalar: string
+ - name: selector
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector
+ elementRelationship: atomic
- name: io.k8s.api.admissionregistration.v1beta1.ServiceReference
map:
fields:
@@ -598,6 +733,128 @@ var schemaYAML = typed.YAMLObject(`types:
- name: port
type:
scalar: numeric
+- name: io.k8s.api.admissionregistration.v1beta1.TypeChecking
+ map:
+ fields:
+ - name: expressionWarnings
+ type:
+ list:
+ elementType:
+ namedType: io.k8s.api.admissionregistration.v1beta1.ExpressionWarning
+ elementRelationship: atomic
+- name: io.k8s.api.admissionregistration.v1beta1.ValidatingAdmissionPolicy
+ map:
+ fields:
+ - name: apiVersion
+ type:
+ scalar: string
+ - name: kind
+ type:
+ scalar: string
+ - name: metadata
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta
+ default: {}
+ - name: spec
+ type:
+ namedType: io.k8s.api.admissionregistration.v1beta1.ValidatingAdmissionPolicySpec
+ default: {}
+ - name: status
+ type:
+ namedType: io.k8s.api.admissionregistration.v1beta1.ValidatingAdmissionPolicyStatus
+ default: {}
+- name: io.k8s.api.admissionregistration.v1beta1.ValidatingAdmissionPolicyBinding
+ map:
+ fields:
+ - name: apiVersion
+ type:
+ scalar: string
+ - name: kind
+ type:
+ scalar: string
+ - name: metadata
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta
+ default: {}
+ - name: spec
+ type:
+ namedType: io.k8s.api.admissionregistration.v1beta1.ValidatingAdmissionPolicyBindingSpec
+ default: {}
+- name: io.k8s.api.admissionregistration.v1beta1.ValidatingAdmissionPolicyBindingSpec
+ map:
+ fields:
+ - name: matchResources
+ type:
+ namedType: io.k8s.api.admissionregistration.v1beta1.MatchResources
+ - name: paramRef
+ type:
+ namedType: io.k8s.api.admissionregistration.v1beta1.ParamRef
+ - name: policyName
+ type:
+ scalar: string
+ - name: validationActions
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: associative
+- name: io.k8s.api.admissionregistration.v1beta1.ValidatingAdmissionPolicySpec
+ map:
+ fields:
+ - name: auditAnnotations
+ type:
+ list:
+ elementType:
+ namedType: io.k8s.api.admissionregistration.v1beta1.AuditAnnotation
+ elementRelationship: atomic
+ - name: failurePolicy
+ type:
+ scalar: string
+ - name: matchConditions
+ type:
+ list:
+ elementType:
+ namedType: io.k8s.api.admissionregistration.v1beta1.MatchCondition
+ elementRelationship: associative
+ keys:
+ - name
+ - name: matchConstraints
+ type:
+ namedType: io.k8s.api.admissionregistration.v1beta1.MatchResources
+ - name: paramKind
+ type:
+ namedType: io.k8s.api.admissionregistration.v1beta1.ParamKind
+ - name: validations
+ type:
+ list:
+ elementType:
+ namedType: io.k8s.api.admissionregistration.v1beta1.Validation
+ elementRelationship: atomic
+ - name: variables
+ type:
+ list:
+ elementType:
+ namedType: io.k8s.api.admissionregistration.v1beta1.Variable
+ elementRelationship: associative
+ keys:
+ - name
+- name: io.k8s.api.admissionregistration.v1beta1.ValidatingAdmissionPolicyStatus
+ map:
+ fields:
+ - name: conditions
+ type:
+ list:
+ elementType:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Condition
+ elementRelationship: associative
+ keys:
+ - type
+ - name: observedGeneration
+ type:
+ scalar: numeric
+ - name: typeChecking
+ type:
+ namedType: io.k8s.api.admissionregistration.v1beta1.TypeChecking
- name: io.k8s.api.admissionregistration.v1beta1.ValidatingWebhook
map:
fields:
@@ -668,6 +925,34 @@ var schemaYAML = typed.YAMLObject(`types:
elementRelationship: associative
keys:
- name
+- name: io.k8s.api.admissionregistration.v1beta1.Validation
+ map:
+ fields:
+ - name: expression
+ type:
+ scalar: string
+ default: ""
+ - name: message
+ type:
+ scalar: string
+ - name: messageExpression
+ type:
+ scalar: string
+ - name: reason
+ type:
+ scalar: string
+- name: io.k8s.api.admissionregistration.v1beta1.Variable
+ map:
+ fields:
+ - name: expression
+ type:
+ scalar: string
+ default: ""
+ - name: name
+ type:
+ scalar: string
+ default: ""
+ elementRelationship: atomic
- name: io.k8s.api.admissionregistration.v1beta1.WebhookClientConfig
map:
fields:
@@ -695,6 +980,12 @@ var schemaYAML = typed.YAMLObject(`types:
- name: encodingVersion
type:
scalar: string
+ - name: servedVersions
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: associative
- name: io.k8s.api.apiserverinternal.v1alpha1.StorageVersion
map:
fields:
@@ -3328,6 +3619,9 @@ var schemaYAML = typed.YAMLObject(`types:
- name: backoffLimit
type:
scalar: numeric
+ - name: backoffLimitPerIndex
+ type:
+ scalar: numeric
- name: completionMode
type:
scalar: string
@@ -3337,12 +3631,18 @@ var schemaYAML = typed.YAMLObject(`types:
- name: manualSelector
type:
scalar: boolean
+ - name: maxFailedIndexes
+ type:
+ scalar: numeric
- name: parallelism
type:
scalar: numeric
- name: podFailurePolicy
type:
namedType: io.k8s.api.batch.v1.PodFailurePolicy
+ - name: podReplacementPolicy
+ type:
+ scalar: string
- name: selector
type:
namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector
@@ -3377,6 +3677,9 @@ var schemaYAML = typed.YAMLObject(`types:
- name: failed
type:
scalar: numeric
+ - name: failedIndexes
+ type:
+ scalar: string
- name: ready
type:
scalar: numeric
@@ -3386,6 +3689,9 @@ var schemaYAML = typed.YAMLObject(`types:
- name: succeeded
type:
scalar: numeric
+ - name: terminating
+ type:
+ scalar: numeric
- name: uncountedTerminatedPods
type:
namedType: io.k8s.api.batch.v1.UncountedTerminatedPods
@@ -4306,6 +4612,9 @@ var schemaYAML = typed.YAMLObject(`types:
type:
namedType: io.k8s.api.core.v1.ResourceRequirements
default: {}
+ - name: restartPolicy
+ type:
+ scalar: string
- name: securityContext
type:
namedType: io.k8s.api.core.v1.SecurityContext
@@ -4723,6 +5032,9 @@ var schemaYAML = typed.YAMLObject(`types:
type:
namedType: io.k8s.api.core.v1.ResourceRequirements
default: {}
+ - name: restartPolicy
+ type:
+ scalar: string
- name: securityContext
type:
namedType: io.k8s.api.core.v1.SecurityContext
@@ -5053,6 +5365,12 @@ var schemaYAML = typed.YAMLObject(`types:
- name: ip
type:
scalar: string
+- name: io.k8s.api.core.v1.HostIP
+ map:
+ fields:
+ - name: ip
+ type:
+ scalar: string
- name: io.k8s.api.core.v1.HostPathVolumeSource
map:
fields:
@@ -5777,6 +6095,12 @@ var schemaYAML = typed.YAMLObject(`types:
elementType:
scalar: string
elementRelationship: atomic
+ - name: allocatedResourceStatuses
+ type:
+ map:
+ elementType:
+ scalar: string
+ elementRelationship: separable
- name: allocatedResources
type:
map:
@@ -5798,9 +6122,6 @@ var schemaYAML = typed.YAMLObject(`types:
- name: phase
type:
scalar: string
- - name: resizeStatus
- type:
- scalar: string
- name: io.k8s.api.core.v1.PersistentVolumeClaimTemplate
map:
fields:
@@ -5927,6 +6248,9 @@ var schemaYAML = typed.YAMLObject(`types:
- name: io.k8s.api.core.v1.PersistentVolumeStatus
map:
fields:
+ - name: lastPhaseTransitionTime
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time
- name: message
type:
scalar: string
@@ -6102,6 +6426,16 @@ var schemaYAML = typed.YAMLObject(`types:
type:
namedType: io.k8s.api.core.v1.ClaimSource
default: {}
+- name: io.k8s.api.core.v1.PodResourceClaimStatus
+ map:
+ fields:
+ - name: name
+ type:
+ scalar: string
+ default: ""
+ - name: resourceClaimName
+ type:
+ scalar: string
- name: io.k8s.api.core.v1.PodSchedulingGate
map:
fields:
@@ -6351,6 +6685,12 @@ var schemaYAML = typed.YAMLObject(`types:
- name: hostIP
type:
scalar: string
+ - name: hostIPs
+ type:
+ list:
+ elementType:
+ namedType: io.k8s.api.core.v1.HostIP
+ elementRelationship: atomic
- name: initContainerStatuses
type:
list:
@@ -6386,6 +6726,14 @@ var schemaYAML = typed.YAMLObject(`types:
- name: resize
type:
scalar: string
+ - name: resourceClaimStatuses
+ type:
+ list:
+ elementType:
+ namedType: io.k8s.api.core.v1.PodResourceClaimStatus
+ elementRelationship: associative
+ keys:
+ - name
- name: startTime
type:
namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time
@@ -8343,10 +8691,6 @@ var schemaYAML = typed.YAMLObject(`types:
type:
namedType: io.k8s.api.extensions.v1beta1.NetworkPolicySpec
default: {}
- - name: status
- type:
- namedType: io.k8s.api.extensions.v1beta1.NetworkPolicyStatus
- default: {}
- name: io.k8s.api.extensions.v1beta1.NetworkPolicyEgressRule
map:
fields:
@@ -8426,17 +8770,6 @@ var schemaYAML = typed.YAMLObject(`types:
elementType:
scalar: string
elementRelationship: atomic
-- name: io.k8s.api.extensions.v1beta1.NetworkPolicyStatus
- map:
- fields:
- - name: conditions
- type:
- list:
- elementType:
- namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Condition
- elementRelationship: associative
- keys:
- - type
- name: io.k8s.api.extensions.v1beta1.ReplicaSet
map:
fields:
@@ -8546,6 +8879,15 @@ var schemaYAML = typed.YAMLObject(`types:
- name: maxUnavailable
type:
namedType: io.k8s.apimachinery.pkg.util.intstr.IntOrString
+- name: io.k8s.api.flowcontrol.v1alpha1.ExemptPriorityLevelConfiguration
+ map:
+ fields:
+ - name: lendablePercent
+ type:
+ scalar: numeric
+ - name: nominalConcurrencyShares
+ type:
+ scalar: numeric
- name: io.k8s.api.flowcontrol.v1alpha1.FlowDistinguisherMethod
map:
fields:
@@ -8749,6 +9091,9 @@ var schemaYAML = typed.YAMLObject(`types:
- name: io.k8s.api.flowcontrol.v1alpha1.PriorityLevelConfigurationSpec
map:
fields:
+ - name: exempt
+ type:
+ namedType: io.k8s.api.flowcontrol.v1alpha1.ExemptPriorityLevelConfiguration
- name: limited
type:
namedType: io.k8s.api.flowcontrol.v1alpha1.LimitedPriorityLevelConfiguration
@@ -8759,6 +9104,8 @@ var schemaYAML = typed.YAMLObject(`types:
unions:
- discriminator: type
fields:
+ - fieldName: exempt
+ discriminatorValue: Exempt
- fieldName: limited
discriminatorValue: Limited
- name: io.k8s.api.flowcontrol.v1alpha1.PriorityLevelConfigurationStatus
@@ -8860,6 +9207,15 @@ var schemaYAML = typed.YAMLObject(`types:
type:
scalar: string
default: ""
+- name: io.k8s.api.flowcontrol.v1beta1.ExemptPriorityLevelConfiguration
+ map:
+ fields:
+ - name: lendablePercent
+ type:
+ scalar: numeric
+ - name: nominalConcurrencyShares
+ type:
+ scalar: numeric
- name: io.k8s.api.flowcontrol.v1beta1.FlowDistinguisherMethod
map:
fields:
@@ -9063,6 +9419,9 @@ var schemaYAML = typed.YAMLObject(`types:
- name: io.k8s.api.flowcontrol.v1beta1.PriorityLevelConfigurationSpec
map:
fields:
+ - name: exempt
+ type:
+ namedType: io.k8s.api.flowcontrol.v1beta1.ExemptPriorityLevelConfiguration
- name: limited
type:
namedType: io.k8s.api.flowcontrol.v1beta1.LimitedPriorityLevelConfiguration
@@ -9073,6 +9432,8 @@ var schemaYAML = typed.YAMLObject(`types:
unions:
- discriminator: type
fields:
+ - fieldName: exempt
+ discriminatorValue: Exempt
- fieldName: limited
discriminatorValue: Limited
- name: io.k8s.api.flowcontrol.v1beta1.PriorityLevelConfigurationStatus
@@ -9174,6 +9535,15 @@ var schemaYAML = typed.YAMLObject(`types:
type:
scalar: string
default: ""
+- name: io.k8s.api.flowcontrol.v1beta2.ExemptPriorityLevelConfiguration
+ map:
+ fields:
+ - name: lendablePercent
+ type:
+ scalar: numeric
+ - name: nominalConcurrencyShares
+ type:
+ scalar: numeric
- name: io.k8s.api.flowcontrol.v1beta2.FlowDistinguisherMethod
map:
fields:
@@ -9377,6 +9747,9 @@ var schemaYAML = typed.YAMLObject(`types:
- name: io.k8s.api.flowcontrol.v1beta2.PriorityLevelConfigurationSpec
map:
fields:
+ - name: exempt
+ type:
+ namedType: io.k8s.api.flowcontrol.v1beta2.ExemptPriorityLevelConfiguration
- name: limited
type:
namedType: io.k8s.api.flowcontrol.v1beta2.LimitedPriorityLevelConfiguration
@@ -9387,6 +9760,8 @@ var schemaYAML = typed.YAMLObject(`types:
unions:
- discriminator: type
fields:
+ - fieldName: exempt
+ discriminatorValue: Exempt
- fieldName: limited
discriminatorValue: Limited
- name: io.k8s.api.flowcontrol.v1beta2.PriorityLevelConfigurationStatus
@@ -9488,6 +9863,15 @@ var schemaYAML = typed.YAMLObject(`types:
type:
scalar: string
default: ""
+- name: io.k8s.api.flowcontrol.v1beta3.ExemptPriorityLevelConfiguration
+ map:
+ fields:
+ - name: lendablePercent
+ type:
+ scalar: numeric
+ - name: nominalConcurrencyShares
+ type:
+ scalar: numeric
- name: io.k8s.api.flowcontrol.v1beta3.FlowDistinguisherMethod
map:
fields:
@@ -9691,6 +10075,9 @@ var schemaYAML = typed.YAMLObject(`types:
- name: io.k8s.api.flowcontrol.v1beta3.PriorityLevelConfigurationSpec
map:
fields:
+ - name: exempt
+ type:
+ namedType: io.k8s.api.flowcontrol.v1beta3.ExemptPriorityLevelConfiguration
- name: limited
type:
namedType: io.k8s.api.flowcontrol.v1beta3.LimitedPriorityLevelConfiguration
@@ -9701,6 +10088,8 @@ var schemaYAML = typed.YAMLObject(`types:
unions:
- discriminator: type
fields:
+ - fieldName: exempt
+ discriminatorValue: Exempt
- fieldName: limited
discriminatorValue: Limited
- name: io.k8s.api.flowcontrol.v1beta3.PriorityLevelConfigurationStatus
@@ -10087,10 +10476,6 @@ var schemaYAML = typed.YAMLObject(`types:
type:
namedType: io.k8s.api.networking.v1.NetworkPolicySpec
default: {}
- - name: status
- type:
- namedType: io.k8s.api.networking.v1.NetworkPolicyStatus
- default: {}
- name: io.k8s.api.networking.v1.NetworkPolicyEgressRule
map:
fields:
@@ -10170,17 +10555,6 @@ var schemaYAML = typed.YAMLObject(`types:
elementType:
scalar: string
elementRelationship: atomic
-- name: io.k8s.api.networking.v1.NetworkPolicyStatus
- map:
- fields:
- - name: conditions
- type:
- list:
- elementType:
- namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Condition
- elementRelationship: associative
- keys:
- - type
- name: io.k8s.api.networking.v1.ServiceBackendPort
map:
fields:
diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicy.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicy.go
index 101510e45f..409507310b 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicy.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicy.go
@@ -32,8 +32,7 @@ import (
type NetworkPolicyApplyConfiguration struct {
v1.TypeMetaApplyConfiguration `json:",inline"`
*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *NetworkPolicySpecApplyConfiguration `json:"spec,omitempty"`
- Status *NetworkPolicyStatusApplyConfiguration `json:"status,omitempty"`
+ Spec *NetworkPolicySpecApplyConfiguration `json:"spec,omitempty"`
}
// NetworkPolicy constructs an declarative configuration of the NetworkPolicy type for use with
@@ -248,11 +247,3 @@ func (b *NetworkPolicyApplyConfiguration) WithSpec(value *NetworkPolicySpecApply
b.Spec = value
return b
}
-
-// WithStatus sets the Status field in the declarative configuration to the given value
-// and returns the receiver, so that objects can be built by chaining "With" function invocations.
-// If called multiple times, the Status field is set to the value of the last call.
-func (b *NetworkPolicyApplyConfiguration) WithStatus(value *NetworkPolicyStatusApplyConfiguration) *NetworkPolicyApplyConfiguration {
- b.Status = value
- return b
-}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicystatus.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicystatus.go
deleted file mode 100644
index 032de18eda..0000000000
--- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicystatus.go
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
-Copyright The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Code generated by applyconfiguration-gen. DO NOT EDIT.
-
-package v1
-
-import (
- v1 "k8s.io/client-go/applyconfigurations/meta/v1"
-)
-
-// NetworkPolicyStatusApplyConfiguration represents an declarative configuration of the NetworkPolicyStatus type for use
-// with apply.
-type NetworkPolicyStatusApplyConfiguration struct {
- Conditions []v1.ConditionApplyConfiguration `json:"conditions,omitempty"`
-}
-
-// NetworkPolicyStatusApplyConfiguration constructs an declarative configuration of the NetworkPolicyStatus type for use with
-// apply.
-func NetworkPolicyStatus() *NetworkPolicyStatusApplyConfiguration {
- return &NetworkPolicyStatusApplyConfiguration{}
-}
-
-// WithConditions adds the given value to the Conditions field in the declarative configuration
-// and returns the receiver, so that objects can be build by chaining "With" function invocations.
-// If called multiple times, values provided by each call will be appended to the Conditions field.
-func (b *NetworkPolicyStatusApplyConfiguration) WithConditions(values ...*v1.ConditionApplyConfiguration) *NetworkPolicyStatusApplyConfiguration {
- for i := range values {
- if values[i] == nil {
- panic("nil value passed to WithConditions")
- }
- b.Conditions = append(b.Conditions, *values[i])
- }
- return b
-}
diff --git a/vendor/k8s.io/client-go/discovery/cached/memory/memcache.go b/vendor/k8s.io/client-go/discovery/cached/memory/memcache.go
index 9143ce00ab..3829b3cc09 100644
--- a/vendor/k8s.io/client-go/discovery/cached/memory/memcache.go
+++ b/vendor/k8s.io/client-go/discovery/cached/memory/memcache.go
@@ -22,7 +22,7 @@ import (
"sync"
"syscall"
- openapi_v2 "github.com/google/gnostic/openapiv2"
+ openapi_v2 "github.com/google/gnostic-models/openapiv2"
errorsutil "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
diff --git a/vendor/k8s.io/client-go/discovery/discovery_client.go b/vendor/k8s.io/client-go/discovery/discovery_client.go
index 5490b48255..a4f083a1ac 100644
--- a/vendor/k8s.io/client-go/discovery/discovery_client.go
+++ b/vendor/k8s.io/client-go/discovery/discovery_client.go
@@ -30,7 +30,7 @@ import (
//nolint:staticcheck // SA1019 Keep using module since it's still being maintained and the api of google.golang.org/protobuf/proto differs
"github.com/golang/protobuf/proto"
- openapi_v2 "github.com/google/gnostic/openapiv2"
+ openapi_v2 "github.com/google/gnostic-models/openapiv2"
apidiscovery "k8s.io/api/apidiscovery/v2beta1"
"k8s.io/apimachinery/pkg/api/errors"
@@ -67,9 +67,6 @@ const (
acceptDiscoveryFormats = AcceptV2Beta1 + "," + AcceptV1
)
-// Aggregated discovery content-type GVK.
-var v2Beta1GVK = schema.GroupVersionKind{Group: "apidiscovery.k8s.io", Version: "v2beta1", Kind: "APIGroupDiscoveryList"}
-
// DiscoveryInterface holds the methods that discover server-supported API groups,
// versions and resources.
type DiscoveryInterface interface {
@@ -263,15 +260,16 @@ func (d *DiscoveryClient) downloadLegacy() (
}
var resourcesByGV map[schema.GroupVersion]*metav1.APIResourceList
- // Based on the content-type server responded with: aggregated or unaggregated.
- if isGVK, _ := ContentTypeIsGVK(responseContentType, v2Beta1GVK); isGVK {
+ // Switch on content-type server responded with: aggregated or unaggregated.
+ switch {
+ case isV2Beta1ContentType(responseContentType):
var aggregatedDiscovery apidiscovery.APIGroupDiscoveryList
err = json.Unmarshal(body, &aggregatedDiscovery)
if err != nil {
return nil, nil, nil, err
}
apiGroupList, resourcesByGV, failedGVs = SplitGroupsAndResources(aggregatedDiscovery)
- } else {
+ default:
// Default is unaggregated discovery v1.
var v metav1.APIVersions
err = json.Unmarshal(body, &v)
@@ -315,15 +313,16 @@ func (d *DiscoveryClient) downloadAPIs() (
apiGroupList := &metav1.APIGroupList{}
failedGVs := map[schema.GroupVersion]error{}
var resourcesByGV map[schema.GroupVersion]*metav1.APIResourceList
- // Based on the content-type server responded with: aggregated or unaggregated.
- if isGVK, _ := ContentTypeIsGVK(responseContentType, v2Beta1GVK); isGVK {
+ // Switch on content-type server responded with: aggregated or unaggregated.
+ switch {
+ case isV2Beta1ContentType(responseContentType):
var aggregatedDiscovery apidiscovery.APIGroupDiscoveryList
err = json.Unmarshal(body, &aggregatedDiscovery)
if err != nil {
return nil, nil, nil, err
}
apiGroupList, resourcesByGV, failedGVs = SplitGroupsAndResources(aggregatedDiscovery)
- } else {
+ default:
// Default is unaggregated discovery v1.
err = json.Unmarshal(body, apiGroupList)
if err != nil {
@@ -334,29 +333,26 @@ func (d *DiscoveryClient) downloadAPIs() (
return apiGroupList, resourcesByGV, failedGVs, nil
}
-// ContentTypeIsGVK checks of the content-type string is both
-// "application/json" and matches the provided GVK. An error
-// is returned if the content type string is malformed.
+// isV2Beta1ContentType checks of the content-type string is both
+// "application/json" and contains the v2beta1 content-type params.
// NOTE: This function is resilient to the ordering of the
// content-type parameters, as well as parameters added by
// intermediaries such as proxies or gateways. Examples:
//
-// ("application/json; g=apidiscovery.k8s.io;v=v2beta1;as=APIGroupDiscoveryList", {apidiscovery.k8s.io, v2beta1, APIGroupDiscoveryList}) = (true, nil)
-// ("application/json; as=APIGroupDiscoveryList;v=v2beta1;g=apidiscovery.k8s.io", {apidiscovery.k8s.io, v2beta1, APIGroupDiscoveryList}) = (true, nil)
-// ("application/json; as=APIGroupDiscoveryList;v=v2beta1;g=apidiscovery.k8s.io;charset=utf-8", {apidiscovery.k8s.io, v2beta1, APIGroupDiscoveryList}) = (true, nil)
-// ("application/json", any GVK) = (false, nil)
-// ("application/json; charset=UTF-8", any GVK) = (false, nil)
-// ("malformed content type string", any GVK) = (false, error)
-func ContentTypeIsGVK(contentType string, gvk schema.GroupVersionKind) (bool, error) {
+// "application/json; g=apidiscovery.k8s.io;v=v2beta1;as=APIGroupDiscoveryList" = true
+// "application/json; as=APIGroupDiscoveryList;v=v2beta1;g=apidiscovery.k8s.io" = true
+// "application/json; as=APIGroupDiscoveryList;v=v2beta1;g=apidiscovery.k8s.io;charset=utf-8" = true
+// "application/json" = false
+// "application/json; charset=UTF-8" = false
+func isV2Beta1ContentType(contentType string) bool {
base, params, err := mime.ParseMediaType(contentType)
if err != nil {
- return false, err
+ return false
}
- gvkMatch := runtime.ContentTypeJSON == base &&
- params["g"] == gvk.Group &&
- params["v"] == gvk.Version &&
- params["as"] == gvk.Kind
- return gvkMatch, nil
+ return runtime.ContentTypeJSON == base &&
+ params["g"] == "apidiscovery.k8s.io" &&
+ params["v"] == "v2beta1" &&
+ params["as"] == "APIGroupDiscoveryList"
}
// ServerGroups returns the supported groups, with information like supported versions and the
diff --git a/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/interface.go b/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/interface.go
index d1e2b61be2..815960df59 100644
--- a/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/interface.go
+++ b/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/interface.go
@@ -26,6 +26,10 @@ import (
type Interface interface {
// MutatingWebhookConfigurations returns a MutatingWebhookConfigurationInformer.
MutatingWebhookConfigurations() MutatingWebhookConfigurationInformer
+ // ValidatingAdmissionPolicies returns a ValidatingAdmissionPolicyInformer.
+ ValidatingAdmissionPolicies() ValidatingAdmissionPolicyInformer
+ // ValidatingAdmissionPolicyBindings returns a ValidatingAdmissionPolicyBindingInformer.
+ ValidatingAdmissionPolicyBindings() ValidatingAdmissionPolicyBindingInformer
// ValidatingWebhookConfigurations returns a ValidatingWebhookConfigurationInformer.
ValidatingWebhookConfigurations() ValidatingWebhookConfigurationInformer
}
@@ -46,6 +50,16 @@ func (v *version) MutatingWebhookConfigurations() MutatingWebhookConfigurationIn
return &mutatingWebhookConfigurationInformer{factory: v.factory, tweakListOptions: v.tweakListOptions}
}
+// ValidatingAdmissionPolicies returns a ValidatingAdmissionPolicyInformer.
+func (v *version) ValidatingAdmissionPolicies() ValidatingAdmissionPolicyInformer {
+ return &validatingAdmissionPolicyInformer{factory: v.factory, tweakListOptions: v.tweakListOptions}
+}
+
+// ValidatingAdmissionPolicyBindings returns a ValidatingAdmissionPolicyBindingInformer.
+func (v *version) ValidatingAdmissionPolicyBindings() ValidatingAdmissionPolicyBindingInformer {
+ return &validatingAdmissionPolicyBindingInformer{factory: v.factory, tweakListOptions: v.tweakListOptions}
+}
+
// ValidatingWebhookConfigurations returns a ValidatingWebhookConfigurationInformer.
func (v *version) ValidatingWebhookConfigurations() ValidatingWebhookConfigurationInformer {
return &validatingWebhookConfigurationInformer{factory: v.factory, tweakListOptions: v.tweakListOptions}
diff --git a/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/validatingadmissionpolicy.go b/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/validatingadmissionpolicy.go
new file mode 100644
index 0000000000..d0e9cd64c8
--- /dev/null
+++ b/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/validatingadmissionpolicy.go
@@ -0,0 +1,89 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by informer-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+ "context"
+ time "time"
+
+ admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ watch "k8s.io/apimachinery/pkg/watch"
+ internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
+ kubernetes "k8s.io/client-go/kubernetes"
+ v1beta1 "k8s.io/client-go/listers/admissionregistration/v1beta1"
+ cache "k8s.io/client-go/tools/cache"
+)
+
+// ValidatingAdmissionPolicyInformer provides access to a shared informer and lister for
+// ValidatingAdmissionPolicies.
+type ValidatingAdmissionPolicyInformer interface {
+ Informer() cache.SharedIndexInformer
+ Lister() v1beta1.ValidatingAdmissionPolicyLister
+}
+
+type validatingAdmissionPolicyInformer struct {
+ factory internalinterfaces.SharedInformerFactory
+ tweakListOptions internalinterfaces.TweakListOptionsFunc
+}
+
+// NewValidatingAdmissionPolicyInformer constructs a new informer for ValidatingAdmissionPolicy type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewValidatingAdmissionPolicyInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
+ return NewFilteredValidatingAdmissionPolicyInformer(client, resyncPeriod, indexers, nil)
+}
+
+// NewFilteredValidatingAdmissionPolicyInformer constructs a new informer for ValidatingAdmissionPolicy type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewFilteredValidatingAdmissionPolicyInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
+ return cache.NewSharedIndexInformer(
+ &cache.ListWatch{
+ ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.AdmissionregistrationV1beta1().ValidatingAdmissionPolicies().List(context.TODO(), options)
+ },
+ WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.AdmissionregistrationV1beta1().ValidatingAdmissionPolicies().Watch(context.TODO(), options)
+ },
+ },
+ &admissionregistrationv1beta1.ValidatingAdmissionPolicy{},
+ resyncPeriod,
+ indexers,
+ )
+}
+
+func (f *validatingAdmissionPolicyInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
+ return NewFilteredValidatingAdmissionPolicyInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
+}
+
+func (f *validatingAdmissionPolicyInformer) Informer() cache.SharedIndexInformer {
+ return f.factory.InformerFor(&admissionregistrationv1beta1.ValidatingAdmissionPolicy{}, f.defaultInformer)
+}
+
+func (f *validatingAdmissionPolicyInformer) Lister() v1beta1.ValidatingAdmissionPolicyLister {
+ return v1beta1.NewValidatingAdmissionPolicyLister(f.Informer().GetIndexer())
+}
diff --git a/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/validatingadmissionpolicybinding.go b/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/validatingadmissionpolicybinding.go
new file mode 100644
index 0000000000..7641e99406
--- /dev/null
+++ b/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/validatingadmissionpolicybinding.go
@@ -0,0 +1,89 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by informer-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+ "context"
+ time "time"
+
+ admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ watch "k8s.io/apimachinery/pkg/watch"
+ internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
+ kubernetes "k8s.io/client-go/kubernetes"
+ v1beta1 "k8s.io/client-go/listers/admissionregistration/v1beta1"
+ cache "k8s.io/client-go/tools/cache"
+)
+
+// ValidatingAdmissionPolicyBindingInformer provides access to a shared informer and lister for
+// ValidatingAdmissionPolicyBindings.
+type ValidatingAdmissionPolicyBindingInformer interface {
+ Informer() cache.SharedIndexInformer
+ Lister() v1beta1.ValidatingAdmissionPolicyBindingLister
+}
+
+type validatingAdmissionPolicyBindingInformer struct {
+ factory internalinterfaces.SharedInformerFactory
+ tweakListOptions internalinterfaces.TweakListOptionsFunc
+}
+
+// NewValidatingAdmissionPolicyBindingInformer constructs a new informer for ValidatingAdmissionPolicyBinding type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewValidatingAdmissionPolicyBindingInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
+ return NewFilteredValidatingAdmissionPolicyBindingInformer(client, resyncPeriod, indexers, nil)
+}
+
+// NewFilteredValidatingAdmissionPolicyBindingInformer constructs a new informer for ValidatingAdmissionPolicyBinding type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewFilteredValidatingAdmissionPolicyBindingInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
+ return cache.NewSharedIndexInformer(
+ &cache.ListWatch{
+ ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.AdmissionregistrationV1beta1().ValidatingAdmissionPolicyBindings().List(context.TODO(), options)
+ },
+ WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.AdmissionregistrationV1beta1().ValidatingAdmissionPolicyBindings().Watch(context.TODO(), options)
+ },
+ },
+ &admissionregistrationv1beta1.ValidatingAdmissionPolicyBinding{},
+ resyncPeriod,
+ indexers,
+ )
+}
+
+func (f *validatingAdmissionPolicyBindingInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
+ return NewFilteredValidatingAdmissionPolicyBindingInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
+}
+
+func (f *validatingAdmissionPolicyBindingInformer) Informer() cache.SharedIndexInformer {
+ return f.factory.InformerFor(&admissionregistrationv1beta1.ValidatingAdmissionPolicyBinding{}, f.defaultInformer)
+}
+
+func (f *validatingAdmissionPolicyBindingInformer) Lister() v1beta1.ValidatingAdmissionPolicyBindingLister {
+ return v1beta1.NewValidatingAdmissionPolicyBindingLister(f.Informer().GetIndexer())
+}
diff --git a/vendor/k8s.io/client-go/informers/factory.go b/vendor/k8s.io/client-go/informers/factory.go
index 8e7a7e36de..7dd0ae6353 100644
--- a/vendor/k8s.io/client-go/informers/factory.go
+++ b/vendor/k8s.io/client-go/informers/factory.go
@@ -184,7 +184,7 @@ func (f *sharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) map[ref
return res
}
-// InternalInformerFor returns the SharedIndexInformer for obj using an internal
+// InformerFor returns the SharedIndexInformer for obj using an internal
// client.
func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer {
f.lock.Lock()
@@ -257,7 +257,7 @@ type SharedInformerFactory interface {
// ForResource gives generic access to a shared informer of the matching type.
ForResource(resource schema.GroupVersionResource) (GenericInformer, error)
- // InternalInformerFor returns the SharedIndexInformer for obj using an internal
+ // InformerFor returns the SharedIndexInformer for obj using an internal
// client.
InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer
diff --git a/vendor/k8s.io/client-go/informers/generic.go b/vendor/k8s.io/client-go/informers/generic.go
index 2b63a8028c..5495239b29 100644
--- a/vendor/k8s.io/client-go/informers/generic.go
+++ b/vendor/k8s.io/client-go/informers/generic.go
@@ -112,6 +112,10 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource
// Group=admissionregistration.k8s.io, Version=v1beta1
case v1beta1.SchemeGroupVersion.WithResource("mutatingwebhookconfigurations"):
return &genericInformer{resource: resource.GroupResource(), informer: f.Admissionregistration().V1beta1().MutatingWebhookConfigurations().Informer()}, nil
+ case v1beta1.SchemeGroupVersion.WithResource("validatingadmissionpolicies"):
+ return &genericInformer{resource: resource.GroupResource(), informer: f.Admissionregistration().V1beta1().ValidatingAdmissionPolicies().Informer()}, nil
+ case v1beta1.SchemeGroupVersion.WithResource("validatingadmissionpolicybindings"):
+ return &genericInformer{resource: resource.GroupResource(), informer: f.Admissionregistration().V1beta1().ValidatingAdmissionPolicyBindings().Informer()}, nil
case v1beta1.SchemeGroupVersion.WithResource("validatingwebhookconfigurations"):
return &genericInformer{resource: resource.GroupResource(), informer: f.Admissionregistration().V1beta1().ValidatingWebhookConfigurations().Informer()}, nil
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/admissionregistration_client.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/admissionregistration_client.go
index 8fda84b1d2..5a0a17d9be 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/admissionregistration_client.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/admissionregistration_client.go
@@ -29,6 +29,8 @@ import (
type AdmissionregistrationV1beta1Interface interface {
RESTClient() rest.Interface
MutatingWebhookConfigurationsGetter
+ ValidatingAdmissionPoliciesGetter
+ ValidatingAdmissionPolicyBindingsGetter
ValidatingWebhookConfigurationsGetter
}
@@ -41,6 +43,14 @@ func (c *AdmissionregistrationV1beta1Client) MutatingWebhookConfigurations() Mut
return newMutatingWebhookConfigurations(c)
}
+func (c *AdmissionregistrationV1beta1Client) ValidatingAdmissionPolicies() ValidatingAdmissionPolicyInterface {
+ return newValidatingAdmissionPolicies(c)
+}
+
+func (c *AdmissionregistrationV1beta1Client) ValidatingAdmissionPolicyBindings() ValidatingAdmissionPolicyBindingInterface {
+ return newValidatingAdmissionPolicyBindings(c)
+}
+
func (c *AdmissionregistrationV1beta1Client) ValidatingWebhookConfigurations() ValidatingWebhookConfigurationInterface {
return newValidatingWebhookConfigurations(c)
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/generated_expansion.go
index 2aeb9c98ae..56ad611f45 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/generated_expansion.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/generated_expansion.go
@@ -20,4 +20,8 @@ package v1beta1
type MutatingWebhookConfigurationExpansion interface{}
+type ValidatingAdmissionPolicyExpansion interface{}
+
+type ValidatingAdmissionPolicyBindingExpansion interface{}
+
type ValidatingWebhookConfigurationExpansion interface{}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingadmissionpolicy.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingadmissionpolicy.go
new file mode 100644
index 0000000000..bea51b587f
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingadmissionpolicy.go
@@ -0,0 +1,243 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+ "context"
+ json "encoding/json"
+ "fmt"
+ "time"
+
+ v1beta1 "k8s.io/api/admissionregistration/v1beta1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ admissionregistrationv1beta1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1"
+ scheme "k8s.io/client-go/kubernetes/scheme"
+ rest "k8s.io/client-go/rest"
+)
+
+// ValidatingAdmissionPoliciesGetter has a method to return a ValidatingAdmissionPolicyInterface.
+// A group's client should implement this interface.
+type ValidatingAdmissionPoliciesGetter interface {
+ ValidatingAdmissionPolicies() ValidatingAdmissionPolicyInterface
+}
+
+// ValidatingAdmissionPolicyInterface has methods to work with ValidatingAdmissionPolicy resources.
+type ValidatingAdmissionPolicyInterface interface {
+ Create(ctx context.Context, validatingAdmissionPolicy *v1beta1.ValidatingAdmissionPolicy, opts v1.CreateOptions) (*v1beta1.ValidatingAdmissionPolicy, error)
+ Update(ctx context.Context, validatingAdmissionPolicy *v1beta1.ValidatingAdmissionPolicy, opts v1.UpdateOptions) (*v1beta1.ValidatingAdmissionPolicy, error)
+ UpdateStatus(ctx context.Context, validatingAdmissionPolicy *v1beta1.ValidatingAdmissionPolicy, opts v1.UpdateOptions) (*v1beta1.ValidatingAdmissionPolicy, error)
+ Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.ValidatingAdmissionPolicy, error)
+ List(ctx context.Context, opts v1.ListOptions) (*v1beta1.ValidatingAdmissionPolicyList, error)
+ Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ValidatingAdmissionPolicy, err error)
+ Apply(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1beta1.ValidatingAdmissionPolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ValidatingAdmissionPolicy, err error)
+ ApplyStatus(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1beta1.ValidatingAdmissionPolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ValidatingAdmissionPolicy, err error)
+ ValidatingAdmissionPolicyExpansion
+}
+
+// validatingAdmissionPolicies implements ValidatingAdmissionPolicyInterface
+type validatingAdmissionPolicies struct {
+ client rest.Interface
+}
+
+// newValidatingAdmissionPolicies returns a ValidatingAdmissionPolicies
+func newValidatingAdmissionPolicies(c *AdmissionregistrationV1beta1Client) *validatingAdmissionPolicies {
+ return &validatingAdmissionPolicies{
+ client: c.RESTClient(),
+ }
+}
+
+// Get takes name of the validatingAdmissionPolicy, and returns the corresponding validatingAdmissionPolicy object, and an error if there is any.
+func (c *validatingAdmissionPolicies) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.ValidatingAdmissionPolicy, err error) {
+ result = &v1beta1.ValidatingAdmissionPolicy{}
+ err = c.client.Get().
+ Resource("validatingadmissionpolicies").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of ValidatingAdmissionPolicies that match those selectors.
+func (c *validatingAdmissionPolicies) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.ValidatingAdmissionPolicyList, err error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ result = &v1beta1.ValidatingAdmissionPolicyList{}
+ err = c.client.Get().
+ Resource("validatingadmissionpolicies").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested validatingAdmissionPolicies.
+func (c *validatingAdmissionPolicies) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.Get().
+ Resource("validatingadmissionpolicies").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Watch(ctx)
+}
+
+// Create takes the representation of a validatingAdmissionPolicy and creates it. Returns the server's representation of the validatingAdmissionPolicy, and an error, if there is any.
+func (c *validatingAdmissionPolicies) Create(ctx context.Context, validatingAdmissionPolicy *v1beta1.ValidatingAdmissionPolicy, opts v1.CreateOptions) (result *v1beta1.ValidatingAdmissionPolicy, err error) {
+ result = &v1beta1.ValidatingAdmissionPolicy{}
+ err = c.client.Post().
+ Resource("validatingadmissionpolicies").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(validatingAdmissionPolicy).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Update takes the representation of a validatingAdmissionPolicy and updates it. Returns the server's representation of the validatingAdmissionPolicy, and an error, if there is any.
+func (c *validatingAdmissionPolicies) Update(ctx context.Context, validatingAdmissionPolicy *v1beta1.ValidatingAdmissionPolicy, opts v1.UpdateOptions) (result *v1beta1.ValidatingAdmissionPolicy, err error) {
+ result = &v1beta1.ValidatingAdmissionPolicy{}
+ err = c.client.Put().
+ Resource("validatingadmissionpolicies").
+ Name(validatingAdmissionPolicy.Name).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(validatingAdmissionPolicy).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *validatingAdmissionPolicies) UpdateStatus(ctx context.Context, validatingAdmissionPolicy *v1beta1.ValidatingAdmissionPolicy, opts v1.UpdateOptions) (result *v1beta1.ValidatingAdmissionPolicy, err error) {
+ result = &v1beta1.ValidatingAdmissionPolicy{}
+ err = c.client.Put().
+ Resource("validatingadmissionpolicies").
+ Name(validatingAdmissionPolicy.Name).
+ SubResource("status").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(validatingAdmissionPolicy).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Delete takes name of the validatingAdmissionPolicy and deletes it. Returns an error if one occurs.
+func (c *validatingAdmissionPolicies) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
+ return c.client.Delete().
+ Resource("validatingadmissionpolicies").
+ Name(name).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *validatingAdmissionPolicies) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
+ var timeout time.Duration
+ if listOpts.TimeoutSeconds != nil {
+ timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
+ }
+ return c.client.Delete().
+ Resource("validatingadmissionpolicies").
+ VersionedParams(&listOpts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// Patch applies the patch and returns the patched validatingAdmissionPolicy.
+func (c *validatingAdmissionPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ValidatingAdmissionPolicy, err error) {
+ result = &v1beta1.ValidatingAdmissionPolicy{}
+ err = c.client.Patch(pt).
+ Resource("validatingadmissionpolicies").
+ Name(name).
+ SubResource(subresources...).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Apply takes the given apply declarative configuration, applies it and returns the applied validatingAdmissionPolicy.
+func (c *validatingAdmissionPolicies) Apply(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1beta1.ValidatingAdmissionPolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ValidatingAdmissionPolicy, err error) {
+ if validatingAdmissionPolicy == nil {
+ return nil, fmt.Errorf("validatingAdmissionPolicy provided to Apply must not be nil")
+ }
+ patchOpts := opts.ToPatchOptions()
+ data, err := json.Marshal(validatingAdmissionPolicy)
+ if err != nil {
+ return nil, err
+ }
+ name := validatingAdmissionPolicy.Name
+ if name == nil {
+ return nil, fmt.Errorf("validatingAdmissionPolicy.Name must be provided to Apply")
+ }
+ result = &v1beta1.ValidatingAdmissionPolicy{}
+ err = c.client.Patch(types.ApplyPatchType).
+ Resource("validatingadmissionpolicies").
+ Name(*name).
+ VersionedParams(&patchOpts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// ApplyStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
+func (c *validatingAdmissionPolicies) ApplyStatus(ctx context.Context, validatingAdmissionPolicy *admissionregistrationv1beta1.ValidatingAdmissionPolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ValidatingAdmissionPolicy, err error) {
+ if validatingAdmissionPolicy == nil {
+ return nil, fmt.Errorf("validatingAdmissionPolicy provided to Apply must not be nil")
+ }
+ patchOpts := opts.ToPatchOptions()
+ data, err := json.Marshal(validatingAdmissionPolicy)
+ if err != nil {
+ return nil, err
+ }
+
+ name := validatingAdmissionPolicy.Name
+ if name == nil {
+ return nil, fmt.Errorf("validatingAdmissionPolicy.Name must be provided to Apply")
+ }
+
+ result = &v1beta1.ValidatingAdmissionPolicy{}
+ err = c.client.Patch(types.ApplyPatchType).
+ Resource("validatingadmissionpolicies").
+ Name(*name).
+ SubResource("status").
+ VersionedParams(&patchOpts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingadmissionpolicybinding.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingadmissionpolicybinding.go
new file mode 100644
index 0000000000..bba37bb047
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingadmissionpolicybinding.go
@@ -0,0 +1,197 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+ "context"
+ json "encoding/json"
+ "fmt"
+ "time"
+
+ v1beta1 "k8s.io/api/admissionregistration/v1beta1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ admissionregistrationv1beta1 "k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1"
+ scheme "k8s.io/client-go/kubernetes/scheme"
+ rest "k8s.io/client-go/rest"
+)
+
+// ValidatingAdmissionPolicyBindingsGetter has a method to return a ValidatingAdmissionPolicyBindingInterface.
+// A group's client should implement this interface.
+type ValidatingAdmissionPolicyBindingsGetter interface {
+ ValidatingAdmissionPolicyBindings() ValidatingAdmissionPolicyBindingInterface
+}
+
+// ValidatingAdmissionPolicyBindingInterface has methods to work with ValidatingAdmissionPolicyBinding resources.
+type ValidatingAdmissionPolicyBindingInterface interface {
+ Create(ctx context.Context, validatingAdmissionPolicyBinding *v1beta1.ValidatingAdmissionPolicyBinding, opts v1.CreateOptions) (*v1beta1.ValidatingAdmissionPolicyBinding, error)
+ Update(ctx context.Context, validatingAdmissionPolicyBinding *v1beta1.ValidatingAdmissionPolicyBinding, opts v1.UpdateOptions) (*v1beta1.ValidatingAdmissionPolicyBinding, error)
+ Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.ValidatingAdmissionPolicyBinding, error)
+ List(ctx context.Context, opts v1.ListOptions) (*v1beta1.ValidatingAdmissionPolicyBindingList, error)
+ Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ValidatingAdmissionPolicyBinding, err error)
+ Apply(ctx context.Context, validatingAdmissionPolicyBinding *admissionregistrationv1beta1.ValidatingAdmissionPolicyBindingApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ValidatingAdmissionPolicyBinding, err error)
+ ValidatingAdmissionPolicyBindingExpansion
+}
+
+// validatingAdmissionPolicyBindings implements ValidatingAdmissionPolicyBindingInterface
+type validatingAdmissionPolicyBindings struct {
+ client rest.Interface
+}
+
+// newValidatingAdmissionPolicyBindings returns a ValidatingAdmissionPolicyBindings
+func newValidatingAdmissionPolicyBindings(c *AdmissionregistrationV1beta1Client) *validatingAdmissionPolicyBindings {
+ return &validatingAdmissionPolicyBindings{
+ client: c.RESTClient(),
+ }
+}
+
+// Get takes name of the validatingAdmissionPolicyBinding, and returns the corresponding validatingAdmissionPolicyBinding object, and an error if there is any.
+func (c *validatingAdmissionPolicyBindings) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.ValidatingAdmissionPolicyBinding, err error) {
+ result = &v1beta1.ValidatingAdmissionPolicyBinding{}
+ err = c.client.Get().
+ Resource("validatingadmissionpolicybindings").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of ValidatingAdmissionPolicyBindings that match those selectors.
+func (c *validatingAdmissionPolicyBindings) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.ValidatingAdmissionPolicyBindingList, err error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ result = &v1beta1.ValidatingAdmissionPolicyBindingList{}
+ err = c.client.Get().
+ Resource("validatingadmissionpolicybindings").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested validatingAdmissionPolicyBindings.
+func (c *validatingAdmissionPolicyBindings) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.Get().
+ Resource("validatingadmissionpolicybindings").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Watch(ctx)
+}
+
+// Create takes the representation of a validatingAdmissionPolicyBinding and creates it. Returns the server's representation of the validatingAdmissionPolicyBinding, and an error, if there is any.
+func (c *validatingAdmissionPolicyBindings) Create(ctx context.Context, validatingAdmissionPolicyBinding *v1beta1.ValidatingAdmissionPolicyBinding, opts v1.CreateOptions) (result *v1beta1.ValidatingAdmissionPolicyBinding, err error) {
+ result = &v1beta1.ValidatingAdmissionPolicyBinding{}
+ err = c.client.Post().
+ Resource("validatingadmissionpolicybindings").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(validatingAdmissionPolicyBinding).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Update takes the representation of a validatingAdmissionPolicyBinding and updates it. Returns the server's representation of the validatingAdmissionPolicyBinding, and an error, if there is any.
+func (c *validatingAdmissionPolicyBindings) Update(ctx context.Context, validatingAdmissionPolicyBinding *v1beta1.ValidatingAdmissionPolicyBinding, opts v1.UpdateOptions) (result *v1beta1.ValidatingAdmissionPolicyBinding, err error) {
+ result = &v1beta1.ValidatingAdmissionPolicyBinding{}
+ err = c.client.Put().
+ Resource("validatingadmissionpolicybindings").
+ Name(validatingAdmissionPolicyBinding.Name).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(validatingAdmissionPolicyBinding).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Delete takes name of the validatingAdmissionPolicyBinding and deletes it. Returns an error if one occurs.
+func (c *validatingAdmissionPolicyBindings) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
+ return c.client.Delete().
+ Resource("validatingadmissionpolicybindings").
+ Name(name).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *validatingAdmissionPolicyBindings) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
+ var timeout time.Duration
+ if listOpts.TimeoutSeconds != nil {
+ timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
+ }
+ return c.client.Delete().
+ Resource("validatingadmissionpolicybindings").
+ VersionedParams(&listOpts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// Patch applies the patch and returns the patched validatingAdmissionPolicyBinding.
+func (c *validatingAdmissionPolicyBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ValidatingAdmissionPolicyBinding, err error) {
+ result = &v1beta1.ValidatingAdmissionPolicyBinding{}
+ err = c.client.Patch(pt).
+ Resource("validatingadmissionpolicybindings").
+ Name(name).
+ SubResource(subresources...).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Apply takes the given apply declarative configuration, applies it and returns the applied validatingAdmissionPolicyBinding.
+func (c *validatingAdmissionPolicyBindings) Apply(ctx context.Context, validatingAdmissionPolicyBinding *admissionregistrationv1beta1.ValidatingAdmissionPolicyBindingApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ValidatingAdmissionPolicyBinding, err error) {
+ if validatingAdmissionPolicyBinding == nil {
+ return nil, fmt.Errorf("validatingAdmissionPolicyBinding provided to Apply must not be nil")
+ }
+ patchOpts := opts.ToPatchOptions()
+ data, err := json.Marshal(validatingAdmissionPolicyBinding)
+ if err != nil {
+ return nil, err
+ }
+ name := validatingAdmissionPolicyBinding.Name
+ if name == nil {
+ return nil, fmt.Errorf("validatingAdmissionPolicyBinding.Name must be provided to Apply")
+ }
+ result = &v1beta1.ValidatingAdmissionPolicyBinding{}
+ err = c.client.Patch(types.ApplyPatchType).
+ Resource("validatingadmissionpolicybindings").
+ Name(*name).
+ VersionedParams(&patchOpts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/authentication_client.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/authentication_client.go
index aea9d0e133..81be8b2e04 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/authentication_client.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/authentication_client.go
@@ -28,6 +28,7 @@ import (
type AuthenticationV1Interface interface {
RESTClient() rest.Interface
+ SelfSubjectReviewsGetter
TokenReviewsGetter
}
@@ -36,6 +37,10 @@ type AuthenticationV1Client struct {
restClient rest.Interface
}
+func (c *AuthenticationV1Client) SelfSubjectReviews() SelfSubjectReviewInterface {
+ return newSelfSubjectReviews(c)
+}
+
func (c *AuthenticationV1Client) TokenReviews() TokenReviewInterface {
return newTokenReviews(c)
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/generated_expansion.go
index 0413fb2b66..35f2c22b4f 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/generated_expansion.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/generated_expansion.go
@@ -18,4 +18,6 @@ limitations under the License.
package v1
+type SelfSubjectReviewExpansion interface{}
+
type TokenReviewExpansion interface{}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/selfsubjectreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/selfsubjectreview.go
new file mode 100644
index 0000000000..bfb9603d67
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/selfsubjectreview.go
@@ -0,0 +1,64 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ "context"
+
+ v1 "k8s.io/api/authentication/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ scheme "k8s.io/client-go/kubernetes/scheme"
+ rest "k8s.io/client-go/rest"
+)
+
+// SelfSubjectReviewsGetter has a method to return a SelfSubjectReviewInterface.
+// A group's client should implement this interface.
+type SelfSubjectReviewsGetter interface {
+ SelfSubjectReviews() SelfSubjectReviewInterface
+}
+
+// SelfSubjectReviewInterface has methods to work with SelfSubjectReview resources.
+type SelfSubjectReviewInterface interface {
+ Create(ctx context.Context, selfSubjectReview *v1.SelfSubjectReview, opts metav1.CreateOptions) (*v1.SelfSubjectReview, error)
+ SelfSubjectReviewExpansion
+}
+
+// selfSubjectReviews implements SelfSubjectReviewInterface
+type selfSubjectReviews struct {
+ client rest.Interface
+}
+
+// newSelfSubjectReviews returns a SelfSubjectReviews
+func newSelfSubjectReviews(c *AuthenticationV1Client) *selfSubjectReviews {
+ return &selfSubjectReviews{
+ client: c.RESTClient(),
+ }
+}
+
+// Create takes the representation of a selfSubjectReview and creates it. Returns the server's representation of the selfSubjectReview, and an error, if there is any.
+func (c *selfSubjectReviews) Create(ctx context.Context, selfSubjectReview *v1.SelfSubjectReview, opts metav1.CreateOptions) (result *v1.SelfSubjectReview, err error) {
+ result = &v1.SelfSubjectReview{}
+ err = c.client.Post().
+ Resource("selfsubjectreviews").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(selfSubjectReview).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/networkpolicy.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/networkpolicy.go
index f24099b90d..978b26db03 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/networkpolicy.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/networkpolicy.go
@@ -43,7 +43,6 @@ type NetworkPoliciesGetter interface {
type NetworkPolicyInterface interface {
Create(ctx context.Context, networkPolicy *v1beta1.NetworkPolicy, opts v1.CreateOptions) (*v1beta1.NetworkPolicy, error)
Update(ctx context.Context, networkPolicy *v1beta1.NetworkPolicy, opts v1.UpdateOptions) (*v1beta1.NetworkPolicy, error)
- UpdateStatus(ctx context.Context, networkPolicy *v1beta1.NetworkPolicy, opts v1.UpdateOptions) (*v1beta1.NetworkPolicy, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.NetworkPolicy, error)
@@ -51,7 +50,6 @@ type NetworkPolicyInterface interface {
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.NetworkPolicy, err error)
Apply(ctx context.Context, networkPolicy *extensionsv1beta1.NetworkPolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.NetworkPolicy, err error)
- ApplyStatus(ctx context.Context, networkPolicy *extensionsv1beta1.NetworkPolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.NetworkPolicy, err error)
NetworkPolicyExpansion
}
@@ -141,22 +139,6 @@ func (c *networkPolicies) Update(ctx context.Context, networkPolicy *v1beta1.Net
return
}
-// UpdateStatus was generated because the type contains a Status member.
-// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
-func (c *networkPolicies) UpdateStatus(ctx context.Context, networkPolicy *v1beta1.NetworkPolicy, opts v1.UpdateOptions) (result *v1beta1.NetworkPolicy, err error) {
- result = &v1beta1.NetworkPolicy{}
- err = c.client.Put().
- Namespace(c.ns).
- Resource("networkpolicies").
- Name(networkPolicy.Name).
- SubResource("status").
- VersionedParams(&opts, scheme.ParameterCodec).
- Body(networkPolicy).
- Do(ctx).
- Into(result)
- return
-}
-
// Delete takes name of the networkPolicy and deletes it. Returns an error if one occurs.
func (c *networkPolicies) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
return c.client.Delete().
@@ -224,33 +206,3 @@ func (c *networkPolicies) Apply(ctx context.Context, networkPolicy *extensionsv1
Into(result)
return
}
-
-// ApplyStatus was generated because the type contains a Status member.
-// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
-func (c *networkPolicies) ApplyStatus(ctx context.Context, networkPolicy *extensionsv1beta1.NetworkPolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.NetworkPolicy, err error) {
- if networkPolicy == nil {
- return nil, fmt.Errorf("networkPolicy provided to Apply must not be nil")
- }
- patchOpts := opts.ToPatchOptions()
- data, err := json.Marshal(networkPolicy)
- if err != nil {
- return nil, err
- }
-
- name := networkPolicy.Name
- if name == nil {
- return nil, fmt.Errorf("networkPolicy.Name must be provided to Apply")
- }
-
- result = &v1beta1.NetworkPolicy{}
- err = c.client.Patch(types.ApplyPatchType).
- Namespace(c.ns).
- Resource("networkpolicies").
- Name(*name).
- SubResource("status").
- VersionedParams(&patchOpts, scheme.ParameterCodec).
- Body(data).
- Do(ctx).
- Into(result)
- return
-}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networkpolicy.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networkpolicy.go
index 97afd62786..d7454ce145 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networkpolicy.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networkpolicy.go
@@ -43,7 +43,6 @@ type NetworkPoliciesGetter interface {
type NetworkPolicyInterface interface {
Create(ctx context.Context, networkPolicy *v1.NetworkPolicy, opts metav1.CreateOptions) (*v1.NetworkPolicy, error)
Update(ctx context.Context, networkPolicy *v1.NetworkPolicy, opts metav1.UpdateOptions) (*v1.NetworkPolicy, error)
- UpdateStatus(ctx context.Context, networkPolicy *v1.NetworkPolicy, opts metav1.UpdateOptions) (*v1.NetworkPolicy, error)
Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.NetworkPolicy, error)
@@ -51,7 +50,6 @@ type NetworkPolicyInterface interface {
Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.NetworkPolicy, err error)
Apply(ctx context.Context, networkPolicy *networkingv1.NetworkPolicyApplyConfiguration, opts metav1.ApplyOptions) (result *v1.NetworkPolicy, err error)
- ApplyStatus(ctx context.Context, networkPolicy *networkingv1.NetworkPolicyApplyConfiguration, opts metav1.ApplyOptions) (result *v1.NetworkPolicy, err error)
NetworkPolicyExpansion
}
@@ -141,22 +139,6 @@ func (c *networkPolicies) Update(ctx context.Context, networkPolicy *v1.NetworkP
return
}
-// UpdateStatus was generated because the type contains a Status member.
-// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
-func (c *networkPolicies) UpdateStatus(ctx context.Context, networkPolicy *v1.NetworkPolicy, opts metav1.UpdateOptions) (result *v1.NetworkPolicy, err error) {
- result = &v1.NetworkPolicy{}
- err = c.client.Put().
- Namespace(c.ns).
- Resource("networkpolicies").
- Name(networkPolicy.Name).
- SubResource("status").
- VersionedParams(&opts, scheme.ParameterCodec).
- Body(networkPolicy).
- Do(ctx).
- Into(result)
- return
-}
-
// Delete takes name of the networkPolicy and deletes it. Returns an error if one occurs.
func (c *networkPolicies) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
return c.client.Delete().
@@ -224,33 +206,3 @@ func (c *networkPolicies) Apply(ctx context.Context, networkPolicy *networkingv1
Into(result)
return
}
-
-// ApplyStatus was generated because the type contains a Status member.
-// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
-func (c *networkPolicies) ApplyStatus(ctx context.Context, networkPolicy *networkingv1.NetworkPolicyApplyConfiguration, opts metav1.ApplyOptions) (result *v1.NetworkPolicy, err error) {
- if networkPolicy == nil {
- return nil, fmt.Errorf("networkPolicy provided to Apply must not be nil")
- }
- patchOpts := opts.ToPatchOptions()
- data, err := json.Marshal(networkPolicy)
- if err != nil {
- return nil, err
- }
-
- name := networkPolicy.Name
- if name == nil {
- return nil, fmt.Errorf("networkPolicy.Name must be provided to Apply")
- }
-
- result = &v1.NetworkPolicy{}
- err = c.client.Patch(types.ApplyPatchType).
- Namespace(c.ns).
- Resource("networkpolicies").
- Name(*name).
- SubResource("status").
- VersionedParams(&patchOpts, scheme.ParameterCodec).
- Body(data).
- Do(ctx).
- Into(result)
- return
-}
diff --git a/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/expansion_generated.go b/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/expansion_generated.go
index 8960abc4f4..7148781f42 100644
--- a/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/expansion_generated.go
+++ b/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/expansion_generated.go
@@ -22,6 +22,14 @@ package v1beta1
// MutatingWebhookConfigurationLister.
type MutatingWebhookConfigurationListerExpansion interface{}
+// ValidatingAdmissionPolicyListerExpansion allows custom methods to be added to
+// ValidatingAdmissionPolicyLister.
+type ValidatingAdmissionPolicyListerExpansion interface{}
+
+// ValidatingAdmissionPolicyBindingListerExpansion allows custom methods to be added to
+// ValidatingAdmissionPolicyBindingLister.
+type ValidatingAdmissionPolicyBindingListerExpansion interface{}
+
// ValidatingWebhookConfigurationListerExpansion allows custom methods to be added to
// ValidatingWebhookConfigurationLister.
type ValidatingWebhookConfigurationListerExpansion interface{}
diff --git a/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/validatingadmissionpolicy.go b/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/validatingadmissionpolicy.go
new file mode 100644
index 0000000000..7018b3ceec
--- /dev/null
+++ b/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/validatingadmissionpolicy.go
@@ -0,0 +1,68 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by lister-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+ v1beta1 "k8s.io/api/admissionregistration/v1beta1"
+ "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/labels"
+ "k8s.io/client-go/tools/cache"
+)
+
+// ValidatingAdmissionPolicyLister helps list ValidatingAdmissionPolicies.
+// All objects returned here must be treated as read-only.
+type ValidatingAdmissionPolicyLister interface {
+ // List lists all ValidatingAdmissionPolicies in the indexer.
+ // Objects returned here must be treated as read-only.
+ List(selector labels.Selector) (ret []*v1beta1.ValidatingAdmissionPolicy, err error)
+ // Get retrieves the ValidatingAdmissionPolicy from the index for a given name.
+ // Objects returned here must be treated as read-only.
+ Get(name string) (*v1beta1.ValidatingAdmissionPolicy, error)
+ ValidatingAdmissionPolicyListerExpansion
+}
+
+// validatingAdmissionPolicyLister implements the ValidatingAdmissionPolicyLister interface.
+type validatingAdmissionPolicyLister struct {
+ indexer cache.Indexer
+}
+
+// NewValidatingAdmissionPolicyLister returns a new ValidatingAdmissionPolicyLister.
+func NewValidatingAdmissionPolicyLister(indexer cache.Indexer) ValidatingAdmissionPolicyLister {
+ return &validatingAdmissionPolicyLister{indexer: indexer}
+}
+
+// List lists all ValidatingAdmissionPolicies in the indexer.
+func (s *validatingAdmissionPolicyLister) List(selector labels.Selector) (ret []*v1beta1.ValidatingAdmissionPolicy, err error) {
+ err = cache.ListAll(s.indexer, selector, func(m interface{}) {
+ ret = append(ret, m.(*v1beta1.ValidatingAdmissionPolicy))
+ })
+ return ret, err
+}
+
+// Get retrieves the ValidatingAdmissionPolicy from the index for a given name.
+func (s *validatingAdmissionPolicyLister) Get(name string) (*v1beta1.ValidatingAdmissionPolicy, error) {
+ obj, exists, err := s.indexer.GetByKey(name)
+ if err != nil {
+ return nil, err
+ }
+ if !exists {
+ return nil, errors.NewNotFound(v1beta1.Resource("validatingadmissionpolicy"), name)
+ }
+ return obj.(*v1beta1.ValidatingAdmissionPolicy), nil
+}
diff --git a/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/validatingadmissionpolicybinding.go b/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/validatingadmissionpolicybinding.go
new file mode 100644
index 0000000000..5fcebfd22f
--- /dev/null
+++ b/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/validatingadmissionpolicybinding.go
@@ -0,0 +1,68 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by lister-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+ v1beta1 "k8s.io/api/admissionregistration/v1beta1"
+ "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/labels"
+ "k8s.io/client-go/tools/cache"
+)
+
+// ValidatingAdmissionPolicyBindingLister helps list ValidatingAdmissionPolicyBindings.
+// All objects returned here must be treated as read-only.
+type ValidatingAdmissionPolicyBindingLister interface {
+ // List lists all ValidatingAdmissionPolicyBindings in the indexer.
+ // Objects returned here must be treated as read-only.
+ List(selector labels.Selector) (ret []*v1beta1.ValidatingAdmissionPolicyBinding, err error)
+ // Get retrieves the ValidatingAdmissionPolicyBinding from the index for a given name.
+ // Objects returned here must be treated as read-only.
+ Get(name string) (*v1beta1.ValidatingAdmissionPolicyBinding, error)
+ ValidatingAdmissionPolicyBindingListerExpansion
+}
+
+// validatingAdmissionPolicyBindingLister implements the ValidatingAdmissionPolicyBindingLister interface.
+type validatingAdmissionPolicyBindingLister struct {
+ indexer cache.Indexer
+}
+
+// NewValidatingAdmissionPolicyBindingLister returns a new ValidatingAdmissionPolicyBindingLister.
+func NewValidatingAdmissionPolicyBindingLister(indexer cache.Indexer) ValidatingAdmissionPolicyBindingLister {
+ return &validatingAdmissionPolicyBindingLister{indexer: indexer}
+}
+
+// List lists all ValidatingAdmissionPolicyBindings in the indexer.
+func (s *validatingAdmissionPolicyBindingLister) List(selector labels.Selector) (ret []*v1beta1.ValidatingAdmissionPolicyBinding, err error) {
+ err = cache.ListAll(s.indexer, selector, func(m interface{}) {
+ ret = append(ret, m.(*v1beta1.ValidatingAdmissionPolicyBinding))
+ })
+ return ret, err
+}
+
+// Get retrieves the ValidatingAdmissionPolicyBinding from the index for a given name.
+func (s *validatingAdmissionPolicyBindingLister) Get(name string) (*v1beta1.ValidatingAdmissionPolicyBinding, error) {
+ obj, exists, err := s.indexer.GetByKey(name)
+ if err != nil {
+ return nil, err
+ }
+ if !exists {
+ return nil, errors.NewNotFound(v1beta1.Resource("validatingadmissionpolicybinding"), name)
+ }
+ return obj.(*v1beta1.ValidatingAdmissionPolicyBinding), nil
+}
diff --git a/vendor/k8s.io/client-go/openapi/typeconverter.go b/vendor/k8s.io/client-go/openapi/typeconverter.go
new file mode 100644
index 0000000000..4b91e66d45
--- /dev/null
+++ b/vendor/k8s.io/client-go/openapi/typeconverter.go
@@ -0,0 +1,48 @@
+/*
+Copyright 2023 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package openapi
+
+import (
+ "encoding/json"
+ "fmt"
+
+ "k8s.io/apimachinery/pkg/util/managedfields"
+ "k8s.io/kube-openapi/pkg/spec3"
+ "k8s.io/kube-openapi/pkg/validation/spec"
+)
+
+func NewTypeConverter(client Client, preserveUnknownFields bool) (managedfields.TypeConverter, error) {
+ spec := map[string]*spec.Schema{}
+ paths, err := client.Paths()
+ if err != nil {
+ return nil, fmt.Errorf("failed to list paths: %w", err)
+ }
+ for _, gv := range paths {
+ s, err := gv.Schema("application/json")
+ if err != nil {
+ return nil, fmt.Errorf("failed to download schema: %w", err)
+ }
+ var openapi spec3.OpenAPI
+ if err := json.Unmarshal(s, &openapi); err != nil {
+ return nil, fmt.Errorf("failed to parse schema: %w", err)
+ }
+ for k, v := range openapi.Components.Schemas {
+ spec[k] = v
+ }
+ }
+ return managedfields.NewTypeConverter(spec, preserveUnknownFields)
+}
diff --git a/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/exec.go b/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/exec.go
index 5331b237a7..b471f5cc64 100644
--- a/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/exec.go
+++ b/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/exec.go
@@ -32,12 +32,12 @@ import (
"sync"
"time"
- "github.com/davecgh/go-spew/spew"
"golang.org/x/term"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/runtime/serializer"
+ "k8s.io/apimachinery/pkg/util/dump"
utilnet "k8s.io/apimachinery/pkg/util/net"
"k8s.io/client-go/pkg/apis/clientauthentication"
"k8s.io/client-go/pkg/apis/clientauthentication/install"
@@ -81,8 +81,6 @@ func newCache() *cache {
return &cache{m: make(map[string]*Authenticator)}
}
-var spewConfig = &spew.ConfigState{DisableMethods: true, Indent: " "}
-
func cacheKey(conf *api.ExecConfig, cluster *clientauthentication.Cluster) string {
key := struct {
conf *api.ExecConfig
@@ -91,7 +89,7 @@ func cacheKey(conf *api.ExecConfig, cluster *clientauthentication.Cluster) strin
conf: conf,
cluster: cluster,
}
- return spewConfig.Sprint(key)
+ return dump.Pretty(key)
}
type cache struct {
diff --git a/vendor/k8s.io/client-go/rest/config.go b/vendor/k8s.io/client-go/rest/config.go
index 81e3cbd689..f8ff7e928c 100644
--- a/vendor/k8s.io/client-go/rest/config.go
+++ b/vendor/k8s.io/client-go/rest/config.go
@@ -316,7 +316,7 @@ func RESTClientFor(config *Config) (*RESTClient, error) {
// Validate config.Host before constructing the transport/client so we can fail fast.
// ServerURL will be obtained later in RESTClientForConfigAndClient()
- _, _, err := defaultServerUrlFor(config)
+ _, _, err := DefaultServerUrlFor(config)
if err != nil {
return nil, err
}
@@ -343,7 +343,7 @@ func RESTClientForConfigAndClient(config *Config, httpClient *http.Client) (*RES
return nil, fmt.Errorf("NegotiatedSerializer is required when initializing a RESTClient")
}
- baseURL, versionedAPIPath, err := defaultServerUrlFor(config)
+ baseURL, versionedAPIPath, err := DefaultServerUrlFor(config)
if err != nil {
return nil, err
}
@@ -390,7 +390,7 @@ func UnversionedRESTClientFor(config *Config) (*RESTClient, error) {
// Validate config.Host before constructing the transport/client so we can fail fast.
// ServerURL will be obtained later in UnversionedRESTClientForConfigAndClient()
- _, _, err := defaultServerUrlFor(config)
+ _, _, err := DefaultServerUrlFor(config)
if err != nil {
return nil, err
}
@@ -410,7 +410,7 @@ func UnversionedRESTClientForConfigAndClient(config *Config, httpClient *http.Cl
return nil, fmt.Errorf("NegotiatedSerializer is required when initializing a RESTClient")
}
- baseURL, versionedAPIPath, err := defaultServerUrlFor(config)
+ baseURL, versionedAPIPath, err := DefaultServerUrlFor(config)
if err != nil {
return nil, err
}
@@ -548,7 +548,7 @@ func InClusterConfig() (*Config, error) {
// Note: the Insecure flag is ignored when testing for this value, so MITM attacks are
// still possible.
func IsConfigTransportTLS(config Config) bool {
- baseURL, _, err := defaultServerUrlFor(&config)
+ baseURL, _, err := DefaultServerUrlFor(&config)
if err != nil {
return false
}
diff --git a/vendor/k8s.io/client-go/rest/request.go b/vendor/k8s.io/client-go/rest/request.go
index bb6fb4decb..850e57daeb 100644
--- a/vendor/k8s.io/client-go/rest/request.go
+++ b/vendor/k8s.io/client-go/rest/request.go
@@ -24,6 +24,7 @@ import (
"io"
"mime"
"net/http"
+ "net/http/httptrace"
"net/url"
"os"
"path"
@@ -925,15 +926,38 @@ func (r *Request) newHTTPRequest(ctx context.Context) (*http.Request, error) {
}
url := r.URL().String()
- req, err := http.NewRequest(r.verb, url, body)
+ req, err := http.NewRequestWithContext(httptrace.WithClientTrace(ctx, newDNSMetricsTrace(ctx)), r.verb, url, body)
if err != nil {
return nil, err
}
- req = req.WithContext(ctx)
req.Header = r.headers
return req, nil
}
+// newDNSMetricsTrace returns an HTTP trace that tracks time spent on DNS lookups per host.
+// This metric is available in client as "rest_client_dns_resolution_duration_seconds".
+func newDNSMetricsTrace(ctx context.Context) *httptrace.ClientTrace {
+ type dnsMetric struct {
+ start time.Time
+ host string
+ sync.Mutex
+ }
+ dns := &dnsMetric{}
+ return &httptrace.ClientTrace{
+ DNSStart: func(info httptrace.DNSStartInfo) {
+ dns.Lock()
+ defer dns.Unlock()
+ dns.start = time.Now()
+ dns.host = info.Host
+ },
+ DNSDone: func(info httptrace.DNSDoneInfo) {
+ dns.Lock()
+ defer dns.Unlock()
+ metrics.ResolverLatency.Observe(ctx, dns.host, time.Since(dns.start))
+ },
+ }
+}
+
// request connects to the server and invokes the provided function when a server response is
// received. It handles retry behavior and up front validation of requests. It will invoke
// fn at most once. It will return an error if a problem occurred prior to connecting to the
diff --git a/vendor/k8s.io/client-go/rest/url_utils.go b/vendor/k8s.io/client-go/rest/url_utils.go
index a56d1838d8..c4ce6e3b8f 100644
--- a/vendor/k8s.io/client-go/rest/url_utils.go
+++ b/vendor/k8s.io/client-go/rest/url_utils.go
@@ -77,9 +77,9 @@ func DefaultVersionedAPIPath(apiPath string, groupVersion schema.GroupVersion) s
return versionedAPIPath
}
-// defaultServerUrlFor is shared between IsConfigTransportTLS and RESTClientFor. It
+// DefaultServerUrlFor is shared between IsConfigTransportTLS and RESTClientFor. It
// requires Host and Version to be set prior to being called.
-func defaultServerUrlFor(config *Config) (*url.URL, string, error) {
+func DefaultServerUrlFor(config *Config) (*url.URL, string, error) {
// TODO: move the default to secure when the apiserver supports TLS by default
// config.Insecure is taken to mean "I want HTTPS but don't bother checking the certs against a CA."
hasCA := len(config.CAFile) != 0 || len(config.CAData) != 0
diff --git a/vendor/k8s.io/client-go/tools/cache/OWNERS b/vendor/k8s.io/client-go/tools/cache/OWNERS
index 726205b3df..921ac2fa02 100644
--- a/vendor/k8s.io/client-go/tools/cache/OWNERS
+++ b/vendor/k8s.io/client-go/tools/cache/OWNERS
@@ -2,7 +2,6 @@
approvers:
- thockin
- - lavalamp
- smarterclayton
- wojtek-t
- deads2k
@@ -11,7 +10,6 @@ approvers:
- ncdc
reviewers:
- thockin
- - lavalamp
- smarterclayton
- wojtek-t
- deads2k
@@ -26,3 +24,5 @@ reviewers:
- dims
- ingvagabund
- ncdc
+emeritus_approvers:
+ - lavalamp
diff --git a/vendor/k8s.io/client-go/tools/cache/controller.go b/vendor/k8s.io/client-go/tools/cache/controller.go
index f437f28616..8a1104bde8 100644
--- a/vendor/k8s.io/client-go/tools/cache/controller.go
+++ b/vendor/k8s.io/client-go/tools/cache/controller.go
@@ -18,7 +18,6 @@ package cache
import (
"errors"
- "os"
"sync"
"time"
@@ -148,9 +147,6 @@ func (c *controller) Run(stopCh <-chan struct{}) {
if c.config.WatchErrorHandler != nil {
r.watchErrorHandler = c.config.WatchErrorHandler
}
- if s := os.Getenv("ENABLE_CLIENT_GO_WATCH_LIST_ALPHA"); len(s) > 0 {
- r.UseWatchList = true
- }
c.reflectorMutex.Lock()
c.reflector = r
diff --git a/vendor/k8s.io/client-go/tools/cache/object-names.go b/vendor/k8s.io/client-go/tools/cache/object-names.go
new file mode 100644
index 0000000000..aa8dbb1993
--- /dev/null
+++ b/vendor/k8s.io/client-go/tools/cache/object-names.go
@@ -0,0 +1,65 @@
+/*
+Copyright 2023 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cache
+
+import (
+ "k8s.io/apimachinery/pkg/types"
+)
+
+// ObjectName is a reference to an object of some implicit kind
+type ObjectName struct {
+ Namespace string
+ Name string
+}
+
+// NewObjectName constructs a new one
+func NewObjectName(namespace, name string) ObjectName {
+ return ObjectName{Namespace: namespace, Name: name}
+}
+
+// Parts is the inverse of the constructor
+func (objName ObjectName) Parts() (namespace, name string) {
+ return objName.Namespace, objName.Name
+}
+
+// String returns the standard string encoding,
+// which is designed to match the historical behavior of MetaNamespaceKeyFunc.
+// Note this behavior is different from the String method of types.NamespacedName.
+func (objName ObjectName) String() string {
+ if len(objName.Namespace) > 0 {
+ return objName.Namespace + "/" + objName.Name
+ }
+ return objName.Name
+}
+
+// ParseObjectName tries to parse the standard encoding
+func ParseObjectName(str string) (ObjectName, error) {
+ var objName ObjectName
+ var err error
+ objName.Namespace, objName.Name, err = SplitMetaNamespaceKey(str)
+ return objName, err
+}
+
+// NamespacedNameAsObjectName rebrands the given NamespacedName as an ObjectName
+func NamespacedNameAsObjectName(nn types.NamespacedName) ObjectName {
+ return NewObjectName(nn.Namespace, nn.Name)
+}
+
+// AsNamespacedName rebrands as a NamespacedName
+func (objName ObjectName) AsNamespacedName() types.NamespacedName {
+ return types.NamespacedName{Namespace: objName.Namespace, Name: objName.Name}
+}
diff --git a/vendor/k8s.io/client-go/tools/cache/reflector.go b/vendor/k8s.io/client-go/tools/cache/reflector.go
index 2b335c104c..45eaff5285 100644
--- a/vendor/k8s.io/client-go/tools/cache/reflector.go
+++ b/vendor/k8s.io/client-go/tools/cache/reflector.go
@@ -22,6 +22,7 @@ import (
"fmt"
"io"
"math/rand"
+ "os"
"reflect"
"strings"
"sync"
@@ -69,9 +70,7 @@ type Reflector struct {
listerWatcher ListerWatcher
// backoff manages backoff of ListWatch
backoffManager wait.BackoffManager
- // initConnBackoffManager manages backoff the initial connection with the Watch call of ListAndWatch.
- initConnBackoffManager wait.BackoffManager
- resyncPeriod time.Duration
+ resyncPeriod time.Duration
// clock allows tests to manipulate time
clock clock.Clock
// paginatedResult defines whether pagination should be forced for list calls.
@@ -220,11 +219,10 @@ func NewReflectorWithOptions(lw ListerWatcher, expectedType interface{}, store S
// We used to make the call every 1sec (1 QPS), the goal here is to achieve ~98% traffic reduction when
// API server is not healthy. With these parameters, backoff will stop at [30,60) sec interval which is
// 0.22 QPS. If we don't backoff for 2min, assume API server is healthy and we reset the backoff.
- backoffManager: wait.NewExponentialBackoffManager(800*time.Millisecond, 30*time.Second, 2*time.Minute, 2.0, 1.0, reflectorClock),
- initConnBackoffManager: wait.NewExponentialBackoffManager(800*time.Millisecond, 30*time.Second, 2*time.Minute, 2.0, 1.0, reflectorClock),
- clock: reflectorClock,
- watchErrorHandler: WatchErrorHandler(DefaultWatchErrorHandler),
- expectedType: reflect.TypeOf(expectedType),
+ backoffManager: wait.NewExponentialBackoffManager(800*time.Millisecond, 30*time.Second, 2*time.Minute, 2.0, 1.0, reflectorClock),
+ clock: reflectorClock,
+ watchErrorHandler: WatchErrorHandler(DefaultWatchErrorHandler),
+ expectedType: reflect.TypeOf(expectedType),
}
if r.name == "" {
@@ -239,6 +237,10 @@ func NewReflectorWithOptions(lw ListerWatcher, expectedType interface{}, store S
r.expectedGVK = getExpectedGVKFromObject(expectedType)
}
+ if s := os.Getenv("ENABLE_CLIENT_GO_WATCH_LIST_ALPHA"); len(s) > 0 {
+ r.UseWatchList = true
+ }
+
return r
}
@@ -420,7 +422,7 @@ func (r *Reflector) watch(w watch.Interface, stopCh <-chan struct{}, resyncerrc
select {
case <-stopCh:
return nil
- case <-r.initConnBackoffManager.Backoff().C():
+ case <-r.backoffManager.Backoff().C():
continue
}
}
@@ -446,7 +448,7 @@ func (r *Reflector) watch(w watch.Interface, stopCh <-chan struct{}, resyncerrc
select {
case <-stopCh:
return nil
- case <-r.initConnBackoffManager.Backoff().C():
+ case <-r.backoffManager.Backoff().C():
continue
}
case apierrors.IsInternalError(err) && retry.ShouldRetry():
@@ -508,7 +510,7 @@ func (r *Reflector) list(stopCh <-chan struct{}) error {
pager.PageSize = 0
}
- list, paginatedResult, err = pager.List(context.Background(), options)
+ list, paginatedResult, err = pager.ListWithAlloc(context.Background(), options)
if isExpiredError(err) || isTooLargeResourceVersionError(err) {
r.setIsLastSyncResourceVersionUnavailable(true)
// Retry immediately if the resource version used to list is unavailable.
@@ -517,7 +519,7 @@ func (r *Reflector) list(stopCh <-chan struct{}) error {
// resource version it is listing at is expired or the cache may not yet be synced to the provided
// resource version. So we need to fallback to resourceVersion="" in all to recover and ensure
// the reflector makes forward progress.
- list, paginatedResult, err = pager.List(context.Background(), metav1.ListOptions{ResourceVersion: r.relistResourceVersion()})
+ list, paginatedResult, err = pager.ListWithAlloc(context.Background(), metav1.ListOptions{ResourceVersion: r.relistResourceVersion()})
}
close(listCh)
}()
@@ -555,7 +557,7 @@ func (r *Reflector) list(stopCh <-chan struct{}) error {
}
resourceVersion = listMetaInterface.GetResourceVersion()
initTrace.Step("Resource version extracted")
- items, err := meta.ExtractList(list)
+ items, err := meta.ExtractListWithAlloc(list)
if err != nil {
return fmt.Errorf("unable to understand list result %#v (%v)", list, err)
}
@@ -599,7 +601,7 @@ func (r *Reflector) watchList(stopCh <-chan struct{}) (watch.Interface, error) {
isErrorRetriableWithSideEffectsFn := func(err error) bool {
if canRetry := isWatchErrorRetriable(err); canRetry {
klog.V(2).Infof("%s: watch-list of %v returned %v - backing off", r.name, r.typeDescription, err)
- <-r.initConnBackoffManager.Backoff().C()
+ <-r.backoffManager.Backoff().C()
return true
}
if isExpiredError(err) || isTooLargeResourceVersionError(err) {
diff --git a/vendor/k8s.io/client-go/tools/cache/shared_informer.go b/vendor/k8s.io/client-go/tools/cache/shared_informer.go
index a889fdbc36..be8694ddb6 100644
--- a/vendor/k8s.io/client-go/tools/cache/shared_informer.go
+++ b/vendor/k8s.io/client-go/tools/cache/shared_informer.go
@@ -459,29 +459,30 @@ func (s *sharedIndexInformer) Run(stopCh <-chan struct{}) {
klog.Warningf("The sharedIndexInformer has started, run more than once is not allowed")
return
}
- fifo := NewDeltaFIFOWithOptions(DeltaFIFOOptions{
- KnownObjects: s.indexer,
- EmitDeltaTypeReplaced: true,
- Transformer: s.transform,
- })
-
- cfg := &Config{
- Queue: fifo,
- ListerWatcher: s.listerWatcher,
- ObjectType: s.objectType,
- ObjectDescription: s.objectDescription,
- FullResyncPeriod: s.resyncCheckPeriod,
- RetryOnError: false,
- ShouldResync: s.processor.shouldResync,
-
- Process: s.HandleDeltas,
- WatchErrorHandler: s.watchErrorHandler,
- }
func() {
s.startedLock.Lock()
defer s.startedLock.Unlock()
+ fifo := NewDeltaFIFOWithOptions(DeltaFIFOOptions{
+ KnownObjects: s.indexer,
+ EmitDeltaTypeReplaced: true,
+ Transformer: s.transform,
+ })
+
+ cfg := &Config{
+ Queue: fifo,
+ ListerWatcher: s.listerWatcher,
+ ObjectType: s.objectType,
+ ObjectDescription: s.objectDescription,
+ FullResyncPeriod: s.resyncCheckPeriod,
+ RetryOnError: false,
+ ShouldResync: s.processor.shouldResync,
+
+ Process: s.HandleDeltas,
+ WatchErrorHandler: s.watchErrorHandler,
+ }
+
s.controller = New(cfg)
s.controller.(*controller).clock = s.clock
s.started = true
diff --git a/vendor/k8s.io/client-go/tools/cache/store.go b/vendor/k8s.io/client-go/tools/cache/store.go
index 5308ea7480..5cc3f42ec1 100644
--- a/vendor/k8s.io/client-go/tools/cache/store.go
+++ b/vendor/k8s.io/client-go/tools/cache/store.go
@@ -21,6 +21,7 @@ import (
"strings"
"k8s.io/apimachinery/pkg/api/meta"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// Store is a generic object storage and processing interface. A
@@ -99,20 +100,38 @@ type ExplicitKey string
// The key uses the format / unless is empty, then
// it's just .
//
-// TODO: replace key-as-string with a key-as-struct so that this
-// packing/unpacking won't be necessary.
+// Clients that want a structured alternative can use ObjectToName or MetaObjectToName.
+// Note: this would not be a client that wants a key for a Store because those are
+// necessarily strings.
+//
+// TODO maybe some day?: change Store to be keyed differently
func MetaNamespaceKeyFunc(obj interface{}) (string, error) {
if key, ok := obj.(ExplicitKey); ok {
return string(key), nil
}
+ objName, err := ObjectToName(obj)
+ if err != nil {
+ return "", err
+ }
+ return objName.String(), nil
+}
+
+// ObjectToName returns the structured name for the given object,
+// if indeed it can be viewed as a metav1.Object.
+func ObjectToName(obj interface{}) (ObjectName, error) {
meta, err := meta.Accessor(obj)
if err != nil {
- return "", fmt.Errorf("object has no meta: %v", err)
+ return ObjectName{}, fmt.Errorf("object has no meta: %v", err)
}
- if len(meta.GetNamespace()) > 0 {
- return meta.GetNamespace() + "/" + meta.GetName(), nil
+ return MetaObjectToName(meta), nil
+}
+
+// MetaObjectToName returns the structured name for the given object
+func MetaObjectToName(obj metav1.Object) ObjectName {
+ if len(obj.GetNamespace()) > 0 {
+ return ObjectName{Namespace: obj.GetNamespace(), Name: obj.GetName()}
}
- return meta.GetName(), nil
+ return ObjectName{Namespace: "", Name: obj.GetName()}
}
// SplitMetaNamespaceKey returns the namespace and name that
diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/types.go b/vendor/k8s.io/client-go/tools/clientcmd/api/types.go
index 71fb821b1e..ae8b8c7038 100644
--- a/vendor/k8s.io/client-go/tools/clientcmd/api/types.go
+++ b/vendor/k8s.io/client-go/tools/clientcmd/api/types.go
@@ -67,7 +67,7 @@ type Preferences struct {
type Cluster struct {
// LocationOfOrigin indicates where this object came from. It is used for round tripping config post-merge, but never serialized.
// +k8s:conversion-gen=false
- LocationOfOrigin string
+ LocationOfOrigin string `json:"-"`
// Server is the address of the kubernetes cluster (https://hostname:port).
Server string `json:"server"`
// TLSServerName is used to check server certificate. If TLSServerName is empty, the hostname used to contact the server is used.
@@ -107,7 +107,7 @@ type Cluster struct {
type AuthInfo struct {
// LocationOfOrigin indicates where this object came from. It is used for round tripping config post-merge, but never serialized.
// +k8s:conversion-gen=false
- LocationOfOrigin string
+ LocationOfOrigin string `json:"-"`
// ClientCertificate is the path to a client cert file for TLS.
// +optional
ClientCertificate string `json:"client-certificate,omitempty"`
@@ -159,7 +159,7 @@ type AuthInfo struct {
type Context struct {
// LocationOfOrigin indicates where this object came from. It is used for round tripping config post-merge, but never serialized.
// +k8s:conversion-gen=false
- LocationOfOrigin string
+ LocationOfOrigin string `json:"-"`
// Cluster is the name of the cluster for this context
Cluster string `json:"cluster"`
// AuthInfo is the name of the authInfo for this context
@@ -252,7 +252,7 @@ type ExecConfig struct {
// recommended as one of the prime benefits of exec plugins is that no secrets need
// to be stored directly in the kubeconfig.
// +k8s:conversion-gen=false
- Config runtime.Object
+ Config runtime.Object `json:"-"`
// InteractiveMode determines this plugin's relationship with standard input. Valid
// values are "Never" (this exec plugin never uses standard input), "IfAvailable" (this
@@ -264,7 +264,7 @@ type ExecConfig struct {
// client.authentication.k8s.io/v1beta1, then this field is optional and defaults
// to "IfAvailable" when unset. Otherwise, this field is required.
// +optional
- InteractiveMode ExecInteractiveMode
+ InteractiveMode ExecInteractiveMode `json:"interactiveMode,omitempty"`
// StdinUnavailable indicates whether the exec authenticator can pass standard
// input through to this exec plugin. For example, a higher level entity might be using
@@ -272,14 +272,14 @@ type ExecConfig struct {
// plugin to use standard input. This is kept here in order to keep all of the exec configuration
// together, but it is never serialized.
// +k8s:conversion-gen=false
- StdinUnavailable bool
+ StdinUnavailable bool `json:"-"`
// StdinUnavailableMessage is an optional message to be displayed when the exec authenticator
// cannot successfully run this exec plugin because it needs to use standard input and
// StdinUnavailable is true. For example, a process that is already using standard input to
// read user instructions might set this to "used by my-program to read user instructions".
// +k8s:conversion-gen=false
- StdinUnavailableMessage string
+ StdinUnavailableMessage string `json:"-"`
}
var _ fmt.Stringer = new(ExecConfig)
diff --git a/vendor/k8s.io/client-go/tools/clientcmd/loader.go b/vendor/k8s.io/client-go/tools/clientcmd/loader.go
index 44de1d41d8..b75737f1c9 100644
--- a/vendor/k8s.io/client-go/tools/clientcmd/loader.go
+++ b/vendor/k8s.io/client-go/tools/clientcmd/loader.go
@@ -128,6 +128,28 @@ type ClientConfigLoadingRules struct {
// WarnIfAllMissing indicates whether the configuration files pointed by KUBECONFIG environment variable are present or not.
// In case of missing files, it warns the user about the missing files.
WarnIfAllMissing bool
+
+ // Warner is the warning log callback to use in case of missing files.
+ Warner WarningHandler
+}
+
+// WarningHandler allows to set the logging function to use
+type WarningHandler func(error)
+
+func (handler WarningHandler) Warn(err error) {
+ if handler == nil {
+ klog.V(1).Info(err)
+ } else {
+ handler(err)
+ }
+}
+
+type MissingConfigError struct {
+ Missing []string
+}
+
+func (c MissingConfigError) Error() string {
+ return fmt.Sprintf("Config not found: %s", strings.Join(c.Missing, ", "))
}
// ClientConfigLoadingRules implements the ClientConfigLoader interface.
@@ -219,7 +241,7 @@ func (rules *ClientConfigLoadingRules) Load() (*clientcmdapi.Config, error) {
}
if rules.WarnIfAllMissing && len(missingList) > 0 && len(kubeconfigs) == 0 {
- klog.Warningf("Config not found: %s", strings.Join(missingList, ", "))
+ rules.Warner.Warn(MissingConfigError{Missing: missingList})
}
// first merge all of our maps
diff --git a/vendor/k8s.io/client-go/tools/metrics/metrics.go b/vendor/k8s.io/client-go/tools/metrics/metrics.go
index f36430dc3e..99d3d8e239 100644
--- a/vendor/k8s.io/client-go/tools/metrics/metrics.go
+++ b/vendor/k8s.io/client-go/tools/metrics/metrics.go
@@ -42,6 +42,10 @@ type LatencyMetric interface {
Observe(ctx context.Context, verb string, u url.URL, latency time.Duration)
}
+type ResolverLatencyMetric interface {
+ Observe(ctx context.Context, host string, latency time.Duration)
+}
+
// SizeMetric observes client response size partitioned by verb and host.
type SizeMetric interface {
Observe(ctx context.Context, verb string, host string, size float64)
@@ -64,6 +68,17 @@ type RetryMetric interface {
IncrementRetry(ctx context.Context, code string, method string, host string)
}
+// TransportCacheMetric shows the number of entries in the internal transport cache
+type TransportCacheMetric interface {
+ Observe(value int)
+}
+
+// TransportCreateCallsMetric counts the number of times a transport is created
+// partitioned by the result of the cache: hit, miss, uncacheable
+type TransportCreateCallsMetric interface {
+ Increment(result string)
+}
+
var (
// ClientCertExpiry is the expiry time of a client certificate
ClientCertExpiry ExpiryMetric = noopExpiry{}
@@ -71,6 +86,8 @@ var (
ClientCertRotationAge DurationMetric = noopDuration{}
// RequestLatency is the latency metric that rest clients will update.
RequestLatency LatencyMetric = noopLatency{}
+ // ResolverLatency is the latency metric that DNS resolver will update
+ ResolverLatency ResolverLatencyMetric = noopResolverLatency{}
// RequestSize is the request size metric that rest clients will update.
RequestSize SizeMetric = noopSize{}
// ResponseSize is the response size metric that rest clients will update.
@@ -85,6 +102,12 @@ var (
// RequestRetry is the retry metric that tracks the number of
// retries sent to the server.
RequestRetry RetryMetric = noopRetry{}
+ // TransportCacheEntries is the metric that tracks the number of entries in the
+ // internal transport cache.
+ TransportCacheEntries TransportCacheMetric = noopTransportCache{}
+ // TransportCreateCalls is the metric that counts the number of times a new transport
+ // is created
+ TransportCreateCalls TransportCreateCallsMetric = noopTransportCreateCalls{}
)
// RegisterOpts contains all the metrics to register. Metrics may be nil.
@@ -92,12 +115,15 @@ type RegisterOpts struct {
ClientCertExpiry ExpiryMetric
ClientCertRotationAge DurationMetric
RequestLatency LatencyMetric
+ ResolverLatency ResolverLatencyMetric
RequestSize SizeMetric
ResponseSize SizeMetric
RateLimiterLatency LatencyMetric
RequestResult ResultMetric
ExecPluginCalls CallsMetric
RequestRetry RetryMetric
+ TransportCacheEntries TransportCacheMetric
+ TransportCreateCalls TransportCreateCallsMetric
}
// Register registers metrics for the rest client to use. This can
@@ -113,6 +139,9 @@ func Register(opts RegisterOpts) {
if opts.RequestLatency != nil {
RequestLatency = opts.RequestLatency
}
+ if opts.ResolverLatency != nil {
+ ResolverLatency = opts.ResolverLatency
+ }
if opts.RequestSize != nil {
RequestSize = opts.RequestSize
}
@@ -131,6 +160,12 @@ func Register(opts RegisterOpts) {
if opts.RequestRetry != nil {
RequestRetry = opts.RequestRetry
}
+ if opts.TransportCacheEntries != nil {
+ TransportCacheEntries = opts.TransportCacheEntries
+ }
+ if opts.TransportCreateCalls != nil {
+ TransportCreateCalls = opts.TransportCreateCalls
+ }
})
}
@@ -146,6 +181,11 @@ type noopLatency struct{}
func (noopLatency) Observe(context.Context, string, url.URL, time.Duration) {}
+type noopResolverLatency struct{}
+
+func (n noopResolverLatency) Observe(ctx context.Context, host string, latency time.Duration) {
+}
+
type noopSize struct{}
func (noopSize) Observe(context.Context, string, string, float64) {}
@@ -161,3 +201,11 @@ func (noopCalls) Increment(int, string) {}
type noopRetry struct{}
func (noopRetry) IncrementRetry(context.Context, string, string, string) {}
+
+type noopTransportCache struct{}
+
+func (noopTransportCache) Observe(int) {}
+
+type noopTransportCreateCalls struct{}
+
+func (noopTransportCreateCalls) Increment(string) {}
diff --git a/vendor/k8s.io/client-go/tools/pager/pager.go b/vendor/k8s.io/client-go/tools/pager/pager.go
index 9ba988f685..3c77cc37fa 100644
--- a/vendor/k8s.io/client-go/tools/pager/pager.go
+++ b/vendor/k8s.io/client-go/tools/pager/pager.go
@@ -73,7 +73,23 @@ func New(fn ListPageFunc) *ListPager {
// List returns a single list object, but attempts to retrieve smaller chunks from the
// server to reduce the impact on the server. If the chunk attempt fails, it will load
// the full list instead. The Limit field on options, if unset, will default to the page size.
+//
+// If items in the returned list are retained for different durations, and you want to avoid
+// retaining the whole slice returned by p.PageFn as long as any item is referenced,
+// use ListWithAlloc instead.
func (p *ListPager) List(ctx context.Context, options metav1.ListOptions) (runtime.Object, bool, error) {
+ return p.list(ctx, options, false)
+}
+
+// ListWithAlloc works like List, but avoids retaining references to the items slice returned by p.PageFn.
+// It does this by making a shallow copy of non-pointer items in the slice returned by p.PageFn.
+//
+// If the items in the returned list are not retained, or are retained for the same duration, use List instead for memory efficiency.
+func (p *ListPager) ListWithAlloc(ctx context.Context, options metav1.ListOptions) (runtime.Object, bool, error) {
+ return p.list(ctx, options, true)
+}
+
+func (p *ListPager) list(ctx context.Context, options metav1.ListOptions, allocNew bool) (runtime.Object, bool, error) {
if options.Limit == 0 {
options.Limit = p.PageSize
}
@@ -123,7 +139,11 @@ func (p *ListPager) List(ctx context.Context, options metav1.ListOptions) (runti
list.ResourceVersion = m.GetResourceVersion()
list.SelfLink = m.GetSelfLink()
}
- if err := meta.EachListItem(obj, func(obj runtime.Object) error {
+ eachListItemFunc := meta.EachListItem
+ if allocNew {
+ eachListItemFunc = meta.EachListItemWithAlloc
+ }
+ if err := eachListItemFunc(obj, func(obj runtime.Object) error {
list.Items = append(list.Items, obj)
return nil
}); err != nil {
@@ -156,12 +176,26 @@ func (p *ListPager) List(ctx context.Context, options metav1.ListOptions) (runti
//
// Items are retrieved in chunks from the server to reduce the impact on the server with up to
// ListPager.PageBufferSize chunks buffered concurrently in the background.
+//
+// If items passed to fn are retained for different durations, and you want to avoid
+// retaining the whole slice returned by p.PageFn as long as any item is referenced,
+// use EachListItemWithAlloc instead.
func (p *ListPager) EachListItem(ctx context.Context, options metav1.ListOptions, fn func(obj runtime.Object) error) error {
return p.eachListChunkBuffered(ctx, options, func(obj runtime.Object) error {
return meta.EachListItem(obj, fn)
})
}
+// EachListItemWithAlloc works like EachListItem, but avoids retaining references to the items slice returned by p.PageFn.
+// It does this by making a shallow copy of non-pointer items in the slice returned by p.PageFn.
+//
+// If the items passed to fn are not retained, or are retained for the same duration, use EachListItem instead for memory efficiency.
+func (p *ListPager) EachListItemWithAlloc(ctx context.Context, options metav1.ListOptions, fn func(obj runtime.Object) error) error {
+ return p.eachListChunkBuffered(ctx, options, func(obj runtime.Object) error {
+ return meta.EachListItemWithAlloc(obj, fn)
+ })
+}
+
// eachListChunkBuffered fetches runtimeObject list chunks using this ListPager and invokes fn on
// each list chunk. If fn returns an error, processing stops and that error is returned. If fn does
// not return an error, any error encountered while retrieving the list from the server is
diff --git a/vendor/k8s.io/client-go/tools/record/event.go b/vendor/k8s.io/client-go/tools/record/event.go
index 1582e8ee7d..f176167dc8 100644
--- a/vendor/k8s.io/client-go/tools/record/event.go
+++ b/vendor/k8s.io/client-go/tools/record/event.go
@@ -274,7 +274,7 @@ func recordEvent(sink EventSink, event *v1.Event, patch []byte, updateExistingEv
klog.Errorf("Unable to construct event '%#v': '%v' (will not retry!)", event, err)
return true
case *errors.StatusError:
- if errors.IsAlreadyExists(err) {
+ if errors.IsAlreadyExists(err) || errors.HasStatusCause(err, v1.NamespaceTerminatingCause) {
klog.V(5).Infof("Server rejected event '%#v': '%v' (will not retry!)", event, err)
} else {
klog.Errorf("Server rejected event '%#v': '%v' (will not retry!)", event, err)
diff --git a/vendor/k8s.io/client-go/tools/watch/retrywatcher.go b/vendor/k8s.io/client-go/tools/watch/retrywatcher.go
index e4806d2ea1..d81dc43570 100644
--- a/vendor/k8s.io/client-go/tools/watch/retrywatcher.go
+++ b/vendor/k8s.io/client-go/tools/watch/retrywatcher.go
@@ -24,10 +24,9 @@ import (
"net/http"
"time"
- "github.com/davecgh/go-spew/spew"
-
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/util/dump"
"k8s.io/apimachinery/pkg/util/net"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apimachinery/pkg/watch"
@@ -191,7 +190,7 @@ func (rw *RetryWatcher) doReceive() (bool, time.Duration) {
errObject := apierrors.FromObject(event.Object)
statusErr, ok := errObject.(*apierrors.StatusError)
if !ok {
- klog.Error(spew.Sprintf("Received an error which is not *metav1.Status but %#+v", event.Object))
+ klog.Error(fmt.Sprintf("Received an error which is not *metav1.Status but %s", dump.Pretty(event.Object)))
// Retry unknown errors
return false, 0
}
@@ -220,7 +219,7 @@ func (rw *RetryWatcher) doReceive() (bool, time.Duration) {
// Log here so we have a record of hitting the unexpected error
// and we can whitelist some error codes if we missed any that are expected.
- klog.V(5).Info(spew.Sprintf("Retrying after unexpected error: %#+v", event.Object))
+ klog.V(5).Info(fmt.Sprintf("Retrying after unexpected error: %s", dump.Pretty(event.Object)))
// Retry
return false, statusDelay
diff --git a/vendor/k8s.io/client-go/transport/cache.go b/vendor/k8s.io/client-go/transport/cache.go
index edcc6d1d48..7c7f1b330f 100644
--- a/vendor/k8s.io/client-go/transport/cache.go
+++ b/vendor/k8s.io/client-go/transport/cache.go
@@ -27,6 +27,7 @@ import (
utilnet "k8s.io/apimachinery/pkg/util/net"
"k8s.io/apimachinery/pkg/util/wait"
+ "k8s.io/client-go/tools/metrics"
)
// TlsTransportCache caches TLS http.RoundTrippers different configurations. The
@@ -80,11 +81,16 @@ func (c *tlsTransportCache) get(config *Config) (http.RoundTripper, error) {
// Ensure we only create a single transport for the given TLS options
c.mu.Lock()
defer c.mu.Unlock()
+ defer metrics.TransportCacheEntries.Observe(len(c.transports))
// See if we already have a custom transport for this config
if t, ok := c.transports[key]; ok {
+ metrics.TransportCreateCalls.Increment("hit")
return t, nil
}
+ metrics.TransportCreateCalls.Increment("miss")
+ } else {
+ metrics.TransportCreateCalls.Increment("uncacheable")
}
// Get the TLS options for this client config
diff --git a/vendor/k8s.io/kube-openapi/pkg/builder/openapi.go b/vendor/k8s.io/kube-openapi/pkg/builder/openapi.go
index 98be932cb9..1c4cb5bf87 100644
--- a/vendor/k8s.io/kube-openapi/pkg/builder/openapi.go
+++ b/vendor/k8s.io/kube-openapi/pkg/builder/openapi.go
@@ -152,7 +152,7 @@ func (o *openAPI) finalizeSwagger() (*spec.Swagger, error) {
}
}
- return o.swagger, nil
+ return deduplicateParameters(o.swagger)
}
func (o *openAPI) buildDefinitionRecursively(name string) error {
diff --git a/vendor/k8s.io/kube-openapi/pkg/builder/parameters.go b/vendor/k8s.io/kube-openapi/pkg/builder/parameters.go
new file mode 100644
index 0000000000..2bb8bd885d
--- /dev/null
+++ b/vendor/k8s.io/kube-openapi/pkg/builder/parameters.go
@@ -0,0 +1,259 @@
+/*
+Copyright 2023 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package builder
+
+import (
+ "encoding/base64"
+ "encoding/json"
+ "fmt"
+ "hash/fnv"
+ "sort"
+ "strconv"
+ "strings"
+
+ "k8s.io/kube-openapi/pkg/validation/spec"
+)
+
+// deduplicateParameters finds parameters that are shared across multiple endpoints and replace them with
+// references to the shared parameters in order to avoid repetition.
+//
+// deduplicateParameters does not mutate the source.
+func deduplicateParameters(sp *spec.Swagger) (*spec.Swagger, error) {
+ names, parameters, err := collectSharedParameters(sp)
+ if err != nil {
+ return nil, err
+ }
+
+ if sp.Parameters != nil {
+ return nil, fmt.Errorf("shared parameters already exist") // should not happen with the builder, but to be sure
+ }
+
+ clone := *sp
+ clone.Parameters = parameters
+ return replaceSharedParameters(names, &clone)
+}
+
+// collectSharedParameters finds parameters that show up for many endpoints. These
+// are basically all parameters with the exceptions of those where we know they are
+// endpoint specific, e.g. because they reference the schema of the kind, or have
+// the kind or resource name in the description.
+func collectSharedParameters(sp *spec.Swagger) (namesByJSON map[string]string, ret map[string]spec.Parameter, err error) {
+ if sp == nil || sp.Paths == nil {
+ return nil, nil, nil
+ }
+
+ countsByJSON := map[string]int{}
+ shared := map[string]spec.Parameter{}
+ var keys []string
+
+ collect := func(p *spec.Parameter) error {
+ if (p.In == "query" || p.In == "path") && p.Name == "name" {
+ return nil // ignore name parameter as they are never shared with the Kind in the description
+ }
+ if p.In == "query" && p.Name == "fieldValidation" {
+ return nil // keep fieldValidation parameter unshared because kubectl uses it (until 1.27) to detect server-side field validation support
+ }
+ if p.In == "query" && p.Name == "dryRun" {
+ return nil // keep fieldValidation parameter unshared because kubectl uses it (until 1.26) to detect dry-run support
+ }
+ if p.Schema != nil && p.In == "body" && p.Name == "body" && !strings.HasPrefix(p.Schema.Ref.String(), "#/definitions/io.k8s.apimachinery") {
+ return nil // ignore non-generic body parameters as they reference the custom schema of the kind
+ }
+
+ bs, err := json.Marshal(p)
+ if err != nil {
+ return err
+ }
+
+ k := string(bs)
+ countsByJSON[k]++
+ if count := countsByJSON[k]; count == 1 {
+ shared[k] = *p
+ keys = append(keys, k)
+ }
+
+ return nil
+ }
+
+ for _, path := range sp.Paths.Paths {
+ // per operation parameters
+ for _, op := range operations(&path) {
+ if op == nil {
+ continue // shouldn't happen, but ignore if it does; tested through unit test
+ }
+ for _, p := range op.Parameters {
+ if p.Ref.String() != "" {
+ // shouldn't happen, but ignore if it does
+ continue
+ }
+ if err := collect(&p); err != nil {
+ return nil, nil, err
+ }
+ }
+ }
+
+ // per path parameters
+ for _, p := range path.Parameters {
+ if p.Ref.String() != "" {
+ continue // shouldn't happen, but ignore if it does
+ }
+ if err := collect(&p); err != nil {
+ return nil, nil, err
+ }
+ }
+ }
+
+ // name deterministically
+ sort.Strings(keys)
+ ret = map[string]spec.Parameter{}
+ namesByJSON = map[string]string{}
+ for _, k := range keys {
+ name := shared[k].Name
+ if name == "" {
+ // this should never happen as the name is a required field. But if it does, let's be safe.
+ name = "param"
+ }
+ name += "-" + base64Hash(k)
+ i := 0
+ for {
+ if _, ok := ret[name]; !ok {
+ ret[name] = shared[k]
+ namesByJSON[k] = name
+ break
+ }
+ i++ // only on hash conflict, unlikely with our few variants
+ name = shared[k].Name + "-" + strconv.Itoa(i)
+ }
+ }
+
+ return namesByJSON, ret, nil
+}
+
+func operations(path *spec.PathItem) []*spec.Operation {
+ return []*spec.Operation{path.Get, path.Put, path.Post, path.Delete, path.Options, path.Head, path.Patch}
+}
+
+func base64Hash(s string) string {
+ hash := fnv.New64()
+ hash.Write([]byte(s)) //nolint:errcheck
+ return base64.URLEncoding.EncodeToString(hash.Sum(make([]byte, 0, 8))[:6]) // 8 characters
+}
+
+func replaceSharedParameters(sharedParameterNamesByJSON map[string]string, sp *spec.Swagger) (*spec.Swagger, error) {
+ if sp == nil || sp.Paths == nil {
+ return sp, nil
+ }
+
+ ret := sp
+
+ firstPathChange := true
+ for k, path := range sp.Paths.Paths {
+ pathChanged := false
+
+ // per operation parameters
+ for _, op := range []**spec.Operation{&path.Get, &path.Put, &path.Post, &path.Delete, &path.Options, &path.Head, &path.Patch} {
+ if *op == nil {
+ continue
+ }
+
+ firstParamChange := true
+ for i := range (*op).Parameters {
+ p := (*op).Parameters[i]
+
+ if p.Ref.String() != "" {
+ // shouldn't happen, but be idem-potent if it does
+ continue
+ }
+
+ bs, err := json.Marshal(p)
+ if err != nil {
+ return nil, err
+ }
+
+ if name, ok := sharedParameterNamesByJSON[string(bs)]; ok {
+ if firstParamChange {
+ orig := *op
+ *op = &spec.Operation{}
+ **op = *orig
+ (*op).Parameters = make([]spec.Parameter, len(orig.Parameters))
+ copy((*op).Parameters, orig.Parameters)
+ firstParamChange = false
+ }
+
+ (*op).Parameters[i] = spec.Parameter{
+ Refable: spec.Refable{
+ Ref: spec.MustCreateRef("#/parameters/" + name),
+ },
+ }
+ pathChanged = true
+ }
+ }
+ }
+
+ // per path parameters
+ firstParamChange := true
+ for i := range path.Parameters {
+ p := path.Parameters[i]
+
+ if p.Ref.String() != "" {
+ // shouldn't happen, but be idem-potent if it does
+ continue
+ }
+
+ bs, err := json.Marshal(p)
+ if err != nil {
+ return nil, err
+ }
+
+ if name, ok := sharedParameterNamesByJSON[string(bs)]; ok {
+ if firstParamChange {
+ orig := path.Parameters
+ path.Parameters = make([]spec.Parameter, len(orig))
+ copy(path.Parameters, orig)
+ firstParamChange = false
+ }
+
+ path.Parameters[i] = spec.Parameter{
+ Refable: spec.Refable{
+ Ref: spec.MustCreateRef("#/parameters/" + name),
+ },
+ }
+ pathChanged = true
+ }
+ }
+
+ if pathChanged {
+ if firstPathChange {
+ clone := *sp
+ ret = &clone
+
+ pathsClone := *ret.Paths
+ ret.Paths = &pathsClone
+
+ ret.Paths.Paths = make(map[string]spec.PathItem, len(sp.Paths.Paths))
+ for k, v := range sp.Paths.Paths {
+ ret.Paths.Paths[k] = v
+ }
+
+ firstPathChange = false
+ }
+ ret.Paths.Paths[k] = path
+ }
+ }
+
+ return ret, nil
+}
diff --git a/vendor/k8s.io/kube-openapi/pkg/cached/cache.go b/vendor/k8s.io/kube-openapi/pkg/cached/cache.go
index 16e34853af..76415b7830 100644
--- a/vendor/k8s.io/kube-openapi/pkg/cached/cache.go
+++ b/vendor/k8s.io/kube-openapi/pkg/cached/cache.go
@@ -19,6 +19,8 @@ limitations under the License.
// operations are not repeated unnecessarily. The operations can be
// created as a tree, and replaced dynamically as needed.
//
+// All the operations in this module are thread-safe.
+//
// # Dependencies and types of caches
//
// This package uses a source/transform/sink model of caches to build
@@ -34,12 +36,6 @@ limitations under the License.
// replaced with a new one, and saves the previous results in case an
// error pops-up.
//
-// # Atomicity
-//
-// Most of the operations are not atomic/thread-safe, except for
-// [Replaceable.Replace] which can be performed while the objects
-// are being read.
-//
// # Etags
//
// Etags in this library is a cache version identifier. It doesn't
@@ -54,6 +50,7 @@ package cached
import (
"fmt"
+ "sync"
"sync/atomic"
)
@@ -100,14 +97,6 @@ type Data[T any] interface {
Get() Result[T]
}
-// T is the source type, V is the destination type.
-type merger[K comparable, T, V any] struct {
- mergeFn func(map[K]Result[T]) Result[V]
- caches map[K]Data[T]
- cacheResults map[K]Result[T]
- result Result[V]
-}
-
// NewMerger creates a new merge cache, a cache that merges the result
// of other caches. The function only gets called if any of the
// dependency has changed.
@@ -125,27 +114,89 @@ type merger[K comparable, T, V any] struct {
// function will remerge all the dependencies together everytime. Since
// the list of dependencies is constant, there is no way to save some
// partial merge information either.
+//
+// Also note that Golang map iteration is not stable. If the mergeFn
+// depends on the order iteration to be stable, it will need to
+// implement its own sorting or iteration order.
func NewMerger[K comparable, T, V any](mergeFn func(results map[K]Result[T]) Result[V], caches map[K]Data[T]) Data[V] {
- return &merger[K, T, V]{
+ listCaches := make([]Data[T], 0, len(caches))
+ // maps from index to key
+ indexes := make(map[int]K, len(caches))
+ i := 0
+ for k := range caches {
+ listCaches = append(listCaches, caches[k])
+ indexes[i] = k
+ i++
+ }
+
+ return NewListMerger(func(results []Result[T]) Result[V] {
+ if len(results) != len(indexes) {
+ panic(fmt.Errorf("invalid result length %d, expected %d", len(results), len(indexes)))
+ }
+ m := make(map[K]Result[T], len(results))
+ for i := range results {
+ m[indexes[i]] = results[i]
+ }
+ return mergeFn(m)
+ }, listCaches)
+}
+
+type listMerger[T, V any] struct {
+ lock sync.Mutex
+ mergeFn func([]Result[T]) Result[V]
+ caches []Data[T]
+ cacheResults []Result[T]
+ result Result[V]
+}
+
+// NewListMerger creates a new merge cache that merges the results of
+// other caches in list form. The function only gets called if any of
+// the dependency has changed.
+//
+// The benefit of ListMerger over the basic Merger is that caches are
+// stored in an ordered list so the order of the cache will be
+// preserved in the order of the results passed to the mergeFn.
+//
+// If any of the dependency returned an error before, or any of the
+// dependency returned an error this time, or if the mergeFn failed
+// before, then the function is reran.
+//
+// Note that this assumes there is no "partial" merge, the merge
+// function will remerge all the dependencies together everytime. Since
+// the list of dependencies is constant, there is no way to save some
+// partial merge information either.
+func NewListMerger[T, V any](mergeFn func(results []Result[T]) Result[V], caches []Data[T]) Data[V] {
+ return &listMerger[T, V]{
mergeFn: mergeFn,
caches: caches,
}
}
-func (c *merger[K, T, V]) prepareResults() map[K]Result[T] {
- cacheResults := make(map[K]Result[T], len(c.caches))
- for key, cache := range c.caches {
- cacheResults[key] = cache.Get()
+func (c *listMerger[T, V]) prepareResultsLocked() []Result[T] {
+ cacheResults := make([]Result[T], len(c.caches))
+ ch := make(chan struct {
+ int
+ Result[T]
+ }, len(c.caches))
+ for i := range c.caches {
+ go func(index int) {
+ ch <- struct {
+ int
+ Result[T]
+ }{
+ index,
+ c.caches[index].Get(),
+ }
+ }(i)
+ }
+ for i := 0; i < len(c.caches); i++ {
+ res := <-ch
+ cacheResults[res.int] = res.Result
}
return cacheResults
}
-// Rerun if:
-// - The last run resulted in an error
-// - Any of the dependency previously returned an error
-// - Any of the dependency just returned an error
-// - Any of the dependency's etag changed
-func (c *merger[K, T, V]) needsRunning(results map[K]Result[T]) bool {
+func (c *listMerger[T, V]) needsRunningLocked(results []Result[T]) bool {
if c.cacheResults == nil {
return true
}
@@ -155,12 +206,8 @@ func (c *merger[K, T, V]) needsRunning(results map[K]Result[T]) bool {
if len(results) != len(c.cacheResults) {
panic(fmt.Errorf("invalid number of results: %v (expected %v)", len(results), len(c.cacheResults)))
}
- for key, oldResult := range c.cacheResults {
- newResult, ok := results[key]
- if !ok {
- panic(fmt.Errorf("unknown cache entry: %v", key))
- }
-
+ for i, oldResult := range c.cacheResults {
+ newResult := results[i]
if newResult.Etag != oldResult.Etag || newResult.Err != nil || oldResult.Err != nil {
return true
}
@@ -168,17 +215,17 @@ func (c *merger[K, T, V]) needsRunning(results map[K]Result[T]) bool {
return false
}
-func (c *merger[K, T, V]) Get() Result[V] {
- cacheResults := c.prepareResults()
- if c.needsRunning(cacheResults) {
+func (c *listMerger[T, V]) Get() Result[V] {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+ cacheResults := c.prepareResultsLocked()
+ if c.needsRunningLocked(cacheResults) {
c.cacheResults = cacheResults
c.result = c.mergeFn(c.cacheResults)
}
return c.result
}
-type transformerCacheKeyType struct{}
-
// NewTransformer creates a new cache that transforms the result of
// another cache. The transformFn will only be called if the source
// cache has updated the output, otherwise, the cached result will be
@@ -188,20 +235,17 @@ type transformerCacheKeyType struct{}
// this time, or if the transformerFn failed before, the function is
// reran.
func NewTransformer[T, V any](transformerFn func(Result[T]) Result[V], source Data[T]) Data[V] {
- return NewMerger(func(caches map[transformerCacheKeyType]Result[T]) Result[V] {
- cache, ok := caches[transformerCacheKeyType{}]
- if len(caches) != 1 || !ok {
+ return NewListMerger(func(caches []Result[T]) Result[V] {
+ if len(caches) != 1 {
panic(fmt.Errorf("invalid cache for transformer cache: %v", caches))
}
- return transformerFn(cache)
- }, map[transformerCacheKeyType]Data[T]{
- {}: source,
- })
+ return transformerFn(caches[0])
+ }, []Data[T]{source})
}
// NewSource creates a new cache that generates some data. This
// will always be called since we don't know the origin of the data and
-// if it needs to be updated or not.
+// if it needs to be updated or not. sourceFn MUST be thread-safe.
func NewSource[T any](sourceFn func() Result[T]) Data[T] {
c := source[T](sourceFn)
return &c
@@ -222,25 +266,24 @@ func NewStaticSource[T any](staticFn func() Result[T]) Data[T] {
}
type static[T any] struct {
+ once sync.Once
fn func() Result[T]
- result *Result[T]
+ result Result[T]
}
func (c *static[T]) Get() Result[T] {
- if c.result == nil {
- result := c.fn()
- c.result = &result
- }
- return *c.result
+ c.once.Do(func() {
+ c.result = c.fn()
+ })
+ return c.result
}
-// Replaceable is a cache that carries the result even when the
-// cache is replaced. The cache can be replaced atomically (without any
-// lock held). This is the type that should typically be stored in
+// Replaceable is a cache that carries the result even when the cache is
+// replaced. This is the type that should typically be stored in
// structs.
type Replaceable[T any] struct {
cache atomic.Pointer[Data[T]]
- result *Result[T]
+ result atomic.Pointer[Result[T]]
}
// Get retrieves the data from the underlying source. [Replaceable]
@@ -251,14 +294,19 @@ type Replaceable[T any] struct {
// failure is returned.
func (c *Replaceable[T]) Get() Result[T] {
result := (*c.cache.Load()).Get()
- if result.Err != nil && c.result != nil && c.result.Err == nil {
- return *c.result
+
+ for {
+ cResult := c.result.Load()
+ if result.Err != nil && cResult != nil && cResult.Err == nil {
+ return *cResult
+ }
+ if c.result.CompareAndSwap(cResult, &result) {
+ return result
+ }
}
- c.result = &result
- return *c.result
}
-// Replace changes the cache in a thread-safe way.
+// Replace changes the cache.
func (c *Replaceable[T]) Replace(cache Data[T]) {
c.cache.Swap(&cache)
}
diff --git a/vendor/k8s.io/kube-openapi/pkg/handler/handler.go b/vendor/k8s.io/kube-openapi/pkg/handler/handler.go
index 37cb96f1be..0eb3f2360d 100644
--- a/vendor/k8s.io/kube-openapi/pkg/handler/handler.go
+++ b/vendor/k8s.io/kube-openapi/pkg/handler/handler.go
@@ -22,13 +22,12 @@ import (
"fmt"
"net/http"
"strconv"
- "sync"
"time"
"github.com/NYTimes/gziphandler"
"github.com/emicklei/go-restful/v3"
"github.com/golang/protobuf/proto"
- openapi_v2 "github.com/google/gnostic/openapiv2"
+ openapi_v2 "github.com/google/gnostic-models/openapiv2"
"github.com/google/uuid"
"github.com/munnerz/goautoneg"
klog "k8s.io/klog/v2"
@@ -119,16 +118,14 @@ func ToProtoBinary(json []byte) ([]byte, error) {
// RegisterOpenAPIVersionedService registers a handler to provide access to provided swagger spec.
//
// Deprecated: use OpenAPIService.RegisterOpenAPIVersionedService instead.
-func RegisterOpenAPIVersionedService(spec *spec.Swagger, servePath string, handler common.PathHandler) (*OpenAPIService, error) {
+func RegisterOpenAPIVersionedService(spec *spec.Swagger, servePath string, handler common.PathHandler) *OpenAPIService {
o := NewOpenAPIService(spec)
- return o, o.RegisterOpenAPIVersionedService(servePath, handler)
+ o.RegisterOpenAPIVersionedService(servePath, handler)
+ return o
}
// RegisterOpenAPIVersionedService registers a handler to provide access to provided swagger spec.
-func (o *OpenAPIService) RegisterOpenAPIVersionedService(servePath string, handler common.PathHandler) error {
- // Mutex protects the cache chain
- var mutex sync.Mutex
-
+func (o *OpenAPIService) RegisterOpenAPIVersionedService(servePath string, handler common.PathHandler) {
accepted := []struct {
Type string
SubType string
@@ -157,9 +154,7 @@ func (o *OpenAPIService) RegisterOpenAPIVersionedService(servePath string, handl
continue
}
// serve the first matching media type in the sorted clause list
- mutex.Lock()
result := accepts.GetDataAndEtag.Get()
- mutex.Unlock()
if result.Err != nil {
klog.Errorf("Error in OpenAPI handler: %s", result.Err)
// only return a 503 if we have no older cache data to serve
@@ -183,8 +178,6 @@ func (o *OpenAPIService) RegisterOpenAPIVersionedService(servePath string, handl
return
}),
))
-
- return nil
}
// BuildAndRegisterOpenAPIVersionedService builds the spec and registers a handler to provide access to it.
@@ -203,5 +196,6 @@ func BuildAndRegisterOpenAPIVersionedServiceFromRoutes(servePath string, routeCo
return nil, err
}
o := NewOpenAPIService(spec)
- return o, o.RegisterOpenAPIVersionedService(servePath, handler)
+ o.RegisterOpenAPIVersionedService(servePath, handler)
+ return o, nil
}
diff --git a/vendor/k8s.io/kube-openapi/pkg/handler3/handler.go b/vendor/k8s.io/kube-openapi/pkg/handler3/handler.go
index 66b7a68da6..2263e2f32b 100644
--- a/vendor/k8s.io/kube-openapi/pkg/handler3/handler.go
+++ b/vendor/k8s.io/kube-openapi/pkg/handler3/handler.go
@@ -30,7 +30,7 @@ import (
"time"
"github.com/golang/protobuf/proto"
- openapi_v3 "github.com/google/gnostic/openapiv3"
+ openapi_v3 "github.com/google/gnostic-models/openapiv3"
"github.com/google/uuid"
"github.com/munnerz/goautoneg"
"k8s.io/klog/v2"
diff --git a/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go b/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go
index 763923dfff..5789e67ab7 100644
--- a/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go
+++ b/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go
@@ -21,7 +21,7 @@ import (
"sort"
"strings"
- openapi_v2 "github.com/google/gnostic/openapiv2"
+ openapi_v2 "github.com/google/gnostic-models/openapiv2"
"gopkg.in/yaml.v2"
)
diff --git a/vendor/k8s.io/kube-openapi/pkg/util/proto/document_v3.go b/vendor/k8s.io/kube-openapi/pkg/util/proto/document_v3.go
index 519dcf2eba..d9f2896e35 100644
--- a/vendor/k8s.io/kube-openapi/pkg/util/proto/document_v3.go
+++ b/vendor/k8s.io/kube-openapi/pkg/util/proto/document_v3.go
@@ -21,7 +21,7 @@ import (
"reflect"
"strings"
- openapi_v3 "github.com/google/gnostic/openapiv3"
+ openapi_v3 "github.com/google/gnostic-models/openapiv3"
"gopkg.in/yaml.v3"
)
diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/spec/gnostic.go b/vendor/k8s.io/kube-openapi/pkg/validation/spec/gnostic.go
index 406a09d9d1..6a77f2ac82 100644
--- a/vendor/k8s.io/kube-openapi/pkg/validation/spec/gnostic.go
+++ b/vendor/k8s.io/kube-openapi/pkg/validation/spec/gnostic.go
@@ -21,7 +21,7 @@ import (
"strconv"
"github.com/go-openapi/jsonreference"
- openapi_v2 "github.com/google/gnostic/openapiv2"
+ openapi_v2 "github.com/google/gnostic-models/openapiv2"
)
// Interfaces
diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/strfmt/format.go b/vendor/k8s.io/kube-openapi/pkg/validation/strfmt/format.go
index 75c50053b1..c85067a263 100644
--- a/vendor/k8s.io/kube-openapi/pkg/validation/strfmt/format.go
+++ b/vendor/k8s.io/kube-openapi/pkg/validation/strfmt/format.go
@@ -16,13 +16,10 @@ package strfmt
import (
"encoding"
- "fmt"
"reflect"
"strings"
"sync"
- "time"
- "github.com/mitchellh/mapstructure"
"k8s.io/kube-openapi/pkg/validation/errors"
)
@@ -50,7 +47,6 @@ type Registry interface {
ContainsName(string) bool
Validates(string, string) bool
Parse(string, string) (interface{}, error)
- MapStructureHookFunc() mapstructure.DecodeHookFunc
}
type knownFormat struct {
@@ -92,83 +88,6 @@ func NewSeededFormats(seeds []knownFormat, normalizer NameNormalizer) Registry {
}
}
-// MapStructureHookFunc is a decode hook function for mapstructure
-func (f *defaultFormats) MapStructureHookFunc() mapstructure.DecodeHookFunc {
- return func(from reflect.Type, to reflect.Type, data interface{}) (interface{}, error) {
- if from.Kind() != reflect.String {
- return data, nil
- }
- for _, v := range f.data {
- tpe, _ := f.GetType(v.Name)
- if to == tpe {
- switch v.Name {
- case "date":
- d, err := time.Parse(RFC3339FullDate, data.(string))
- if err != nil {
- return nil, err
- }
- return Date(d), nil
- case "datetime":
- input := data.(string)
- if len(input) == 0 {
- return nil, fmt.Errorf("empty string is an invalid datetime format")
- }
- return ParseDateTime(input)
- case "duration":
- dur, err := ParseDuration(data.(string))
- if err != nil {
- return nil, err
- }
- return Duration(dur), nil
- case "uri":
- return URI(data.(string)), nil
- case "email":
- return Email(data.(string)), nil
- case "uuid":
- return UUID(data.(string)), nil
- case "uuid3":
- return UUID3(data.(string)), nil
- case "uuid4":
- return UUID4(data.(string)), nil
- case "uuid5":
- return UUID5(data.(string)), nil
- case "hostname":
- return Hostname(data.(string)), nil
- case "ipv4":
- return IPv4(data.(string)), nil
- case "ipv6":
- return IPv6(data.(string)), nil
- case "cidr":
- return CIDR(data.(string)), nil
- case "mac":
- return MAC(data.(string)), nil
- case "isbn":
- return ISBN(data.(string)), nil
- case "isbn10":
- return ISBN10(data.(string)), nil
- case "isbn13":
- return ISBN13(data.(string)), nil
- case "creditcard":
- return CreditCard(data.(string)), nil
- case "ssn":
- return SSN(data.(string)), nil
- case "hexcolor":
- return HexColor(data.(string)), nil
- case "rgbcolor":
- return RGBColor(data.(string)), nil
- case "byte":
- return Base64(data.(string)), nil
- case "password":
- return Password(data.(string)), nil
- default:
- return nil, errors.InvalidTypeName(v.Name)
- }
- }
- }
- return data, nil
- }
-}
-
// Add adds a new format, return true if this was a new item instead of a replacement
func (f *defaultFormats) Add(name string, strfmt Format, validator Validator) bool {
f.Lock()
diff --git a/vendor/k8s.io/utils/nsenter/nsenter.go b/vendor/k8s.io/utils/nsenter/nsenter.go
index 237b636bce..6f847db878 100644
--- a/vendor/k8s.io/utils/nsenter/nsenter.go
+++ b/vendor/k8s.io/utils/nsenter/nsenter.go
@@ -49,26 +49,28 @@ type Nsenter = NSEnter
//
// NSEnter requires:
//
-// 1. Docker >= 1.6 due to the dependency on the slave propagation mode
+// 1. Docker >= 1.6 due to the dependency on the slave propagation mode
// of the bind-mount of the kubelet root directory in the container.
// Docker 1.5 used a private propagation mode for bind-mounts, so mounts
// performed in the host's mount namespace do not propagate out to the
// bind-mount in this docker version.
-// 2. The host's root filesystem must be available at /rootfs
-// 3. The nsenter binary must be on the Kubelet process' PATH in the container's
+// 2. The host's root filesystem must be available at /rootfs
+// 3. The nsenter binary must be on the Kubelet process' PATH in the container's
// filesystem.
-// 4. The Kubelet process must have CAP_SYS_ADMIN (required by nsenter); at
+// 4. The Kubelet process must have CAP_SYS_ADMIN (required by nsenter); at
// the present, this effectively means that the kubelet is running in a
// privileged container.
-// 5. The volume path used by the Kubelet must be the same inside and outside
+// 5. The volume path used by the Kubelet must be the same inside and outside
// the container and be writable by the container (to initialize volume)
// contents. TODO: remove this requirement.
-// 6. The host image must have "mount", "findmnt", "umount", "stat", "touch",
+// 6. The host image must have "mount", "findmnt", "umount", "stat", "touch",
// "mkdir", "ls", "sh" and "chmod" binaries in /bin, /usr/sbin, or /usr/bin
-// 7. The host image should have systemd-run in /bin, /usr/sbin, or /usr/bin if
+// 7. The host image should have systemd-run in /bin, /usr/sbin, or /usr/bin if
// systemd is installed/enabled in the operating system.
+//
// For more information about mount propagation modes, see:
-// https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt
+//
+// https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt
type NSEnter struct {
// a map of commands to their paths on the host filesystem
paths map[string]string
@@ -174,10 +176,13 @@ func (ne *NSEnter) SupportsSystemd() (string, bool) {
// exist. When it's false, it evaluates symlinks of the existing part and
// blindly adds the non-existing part:
// pathname: /mnt/volume/non/existing/directory
-// /mnt/volume exists
-// non/existing/directory does not exist
+//
+// /mnt/volume exists
+// non/existing/directory does not exist
+//
// -> It resolves symlinks in /mnt/volume to say /mnt/foo and returns
-// /mnt/foo/non/existing/directory.
+//
+// /mnt/foo/non/existing/directory.
//
// BEWARE! EvalSymlinks is not able to detect symlink looks with mustExist=false!
// If /tmp/link is symlink to /tmp/link, EvalSymlinks(/tmp/link/foo) returns /tmp/link/foo.
diff --git a/vendor/k8s.io/utils/trace/trace.go b/vendor/k8s.io/utils/trace/trace.go
index a0b07a6d78..187eb5d8c5 100644
--- a/vendor/k8s.io/utils/trace/trace.go
+++ b/vendor/k8s.io/utils/trace/trace.go
@@ -65,6 +65,11 @@ func durationToMilliseconds(timeDuration time.Duration) int64 {
}
type traceItem interface {
+ // rLock must be called before invoking time or writeItem.
+ rLock()
+ // rUnlock must be called after processing the item is complete.
+ rUnlock()
+
// time returns when the trace was recorded as completed.
time() time.Time
// writeItem outputs the traceItem to the buffer. If stepThreshold is non-nil, only output the
@@ -79,6 +84,10 @@ type traceStep struct {
fields []Field
}
+// rLock doesn't need to do anything because traceStep instances are immutable.
+func (s traceStep) rLock() {}
+func (s traceStep) rUnlock() {}
+
func (s traceStep) time() time.Time {
return s.stepTime
}
@@ -106,6 +115,14 @@ type Trace struct {
traceItems []traceItem
}
+func (t *Trace) rLock() {
+ t.lock.RLock()
+}
+
+func (t *Trace) rUnlock() {
+ t.lock.RUnlock()
+}
+
func (t *Trace) time() time.Time {
if t.endTime != nil {
return *t.endTime
@@ -231,8 +248,10 @@ func (t *Trace) logTrace() {
func (t *Trace) writeTraceSteps(b *bytes.Buffer, formatter string, stepThreshold *time.Duration) {
lastStepTime := t.startTime
for _, stepOrTrace := range t.traceItems {
+ stepOrTrace.rLock()
stepOrTrace.writeItem(b, formatter, lastStepTime, stepThreshold)
lastStepTime = stepOrTrace.time()
+ stepOrTrace.rUnlock()
}
}
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 8eeb8a0a7b..fdfa584de7 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -2,16 +2,18 @@
## explicit
github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2022-08-01/compute
github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2021-10-01/containerservice
-github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network
github.com/Azure/azure-sdk-for-go/services/network/mgmt/2022-07-01/network
github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns
github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2017-05-10/resources
-github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2018-05-01/resources
github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-09-01/storage
github.com/Azure/azure-sdk-for-go/version
-# github.com/Azure/azure-sdk-for-go/sdk/azcore v1.2.0
+# github.com/Azure/azure-sdk-for-go/sdk/azcore v1.8.0
## explicit; go 1.18
github.com/Azure/azure-sdk-for-go/sdk/azcore
+github.com/Azure/azure-sdk-for-go/sdk/azcore/arm
+github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource
+github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/policy
+github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime
github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud
github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported
github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log
@@ -25,17 +27,35 @@ github.com/Azure/azure-sdk-for-go/sdk/azcore/log
github.com/Azure/azure-sdk-for-go/sdk/azcore/policy
github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime
github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming
+github.com/Azure/azure-sdk-for-go/sdk/azcore/to
github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing
-# github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.0
+# github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0
## explicit; go 1.18
github.com/Azure/azure-sdk-for-go/sdk/azidentity
-# github.com/Azure/azure-sdk-for-go/sdk/internal v1.1.1
+# github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0
## explicit; go 1.18
github.com/Azure/azure-sdk-for-go/sdk/internal/diag
github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo
+github.com/Azure/azure-sdk-for-go/sdk/internal/exported
github.com/Azure/azure-sdk-for-go/sdk/internal/log
+github.com/Azure/azure-sdk-for-go/sdk/internal/poller
github.com/Azure/azure-sdk-for-go/sdk/internal/temporal
github.com/Azure/azure-sdk-for-go/sdk/internal/uuid
+# github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.2.0
+## explicit; go 1.18
+github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5
+# github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4 v4.3.0
+## explicit; go 1.18
+github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4
+# github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.2.0
+## explicit; go 1.18
+github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4
+# github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/privatedns/armprivatedns v1.1.0
+## explicit; go 1.18
+github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/privatedns/armprivatedns
+# github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.1.1
+## explicit; go 1.18
+github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources
# github.com/Azure/go-autorest v14.2.0+incompatible
## explicit
github.com/Azure/go-autorest
@@ -64,8 +84,8 @@ github.com/Azure/go-autorest/logger
# github.com/Azure/go-autorest/tracing v0.6.0
## explicit; go 1.12
github.com/Azure/go-autorest/tracing
-# github.com/AzureAD/microsoft-authentication-library-for-go v0.7.0
-## explicit; go 1.17
+# github.com/AzureAD/microsoft-authentication-library-for-go v1.1.1
+## explicit; go 1.18
github.com/AzureAD/microsoft-authentication-library-for-go/apps/cache
github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential
github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors
@@ -83,6 +103,7 @@ github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/o
github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/grant
github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust
github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs
+github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/options
github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared
github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/version
github.com/AzureAD/microsoft-authentication-library-for-go/apps/public
@@ -93,9 +114,9 @@ github.com/Microsoft/go-winio/pkg/guid
# github.com/NYTimes/gziphandler v1.1.1
## explicit; go 1.11
github.com/NYTimes/gziphandler
-# github.com/antlr/antlr4/runtime/Go/antlr v1.4.10
-## explicit; go 1.16
-github.com/antlr/antlr4/runtime/Go/antlr
+# github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df
+## explicit; go 1.18
+github.com/antlr/antlr4/runtime/Go/antlr/v4
# github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a
## explicit
github.com/asaskevich/govalidator
@@ -111,7 +132,7 @@ github.com/cenkalti/backoff/v4
# github.com/cespare/xxhash/v2 v2.2.0
## explicit; go 1.11
github.com/cespare/xxhash/v2
-# github.com/container-storage-interface/spec v1.7.0
+# github.com/container-storage-interface/spec v1.8.0
## explicit; go 1.18
github.com/container-storage-interface/spec/lib/go/csi
# github.com/coreos/go-semver v0.3.0
@@ -128,7 +149,7 @@ github.com/davecgh/go-spew/spew
## explicit
github.com/docker/distribution/digestset
github.com/docker/distribution/reference
-# github.com/emicklei/go-restful/v3 v3.9.0
+# github.com/emicklei/go-restful/v3 v3.10.2
## explicit; go 1.13
github.com/emicklei/go-restful/v3
github.com/emicklei/go-restful/v3/log
@@ -151,11 +172,11 @@ github.com/go-logr/stdr
# github.com/go-openapi/jsonpointer v0.19.6
## explicit; go 1.13
github.com/go-openapi/jsonpointer
-# github.com/go-openapi/jsonreference v0.20.1
+# github.com/go-openapi/jsonreference v0.20.2
## explicit; go 1.13
github.com/go-openapi/jsonreference
github.com/go-openapi/jsonreference/internal
-# github.com/go-openapi/swag v0.22.3
+# github.com/go-openapi/swag v0.22.4
## explicit; go 1.18
github.com/go-openapi/swag
# github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572
@@ -170,6 +191,9 @@ github.com/gogo/protobuf/sortkeys
# github.com/golang-jwt/jwt/v4 v4.5.0
## explicit; go 1.16
github.com/golang-jwt/jwt/v4
+# github.com/golang-jwt/jwt/v5 v5.0.0
+## explicit; go 1.18
+github.com/golang-jwt/jwt/v5
# github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da
## explicit
github.com/golang/groupcache/lru
@@ -187,8 +211,8 @@ github.com/golang/protobuf/ptypes/any
github.com/golang/protobuf/ptypes/duration
github.com/golang/protobuf/ptypes/timestamp
github.com/golang/protobuf/ptypes/wrappers
-# github.com/google/cel-go v0.12.7
-## explicit; go 1.17
+# github.com/google/cel-go v0.16.1
+## explicit; go 1.18
github.com/google/cel-go/cel
github.com/google/cel-go/checker
github.com/google/cel-go/checker/decls
@@ -207,13 +231,13 @@ github.com/google/cel-go/interpreter
github.com/google/cel-go/interpreter/functions
github.com/google/cel-go/parser
github.com/google/cel-go/parser/gen
-# github.com/google/gnostic v0.5.7-v3refs
-## explicit; go 1.12
-github.com/google/gnostic/compiler
-github.com/google/gnostic/extensions
-github.com/google/gnostic/jsonschema
-github.com/google/gnostic/openapiv2
-github.com/google/gnostic/openapiv3
+# github.com/google/gnostic-models v0.6.8
+## explicit; go 1.18
+github.com/google/gnostic-models/compiler
+github.com/google/gnostic-models/extensions
+github.com/google/gnostic-models/jsonschema
+github.com/google/gnostic-models/openapiv2
+github.com/google/gnostic-models/openapiv3
# github.com/google/go-cmp v0.6.0
## explicit; go 1.13
github.com/google/go-cmp/cmp
@@ -225,8 +249,8 @@ github.com/google/go-cmp/cmp/internal/value
## explicit; go 1.12
github.com/google/gofuzz
github.com/google/gofuzz/bytesource
-# github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1
-## explicit; go 1.14
+# github.com/google/pprof v0.0.0-20230602010524-ada837c32108
+## explicit; go 1.19
github.com/google/pprof/profile
# github.com/google/uuid v1.3.1
## explicit
@@ -245,19 +269,16 @@ github.com/imdario/mergo
# github.com/inconshreveable/mousetrap v1.1.0
## explicit; go 1.18
github.com/inconshreveable/mousetrap
-# github.com/jongio/azidext/go/azidext v0.4.0
-## explicit; go 1.18
-github.com/jongio/azidext/go/azidext
# github.com/josharian/intern v1.0.0
## explicit; go 1.5
github.com/josharian/intern
# github.com/json-iterator/go v1.1.12
## explicit; go 1.12
github.com/json-iterator/go
-# github.com/kubernetes-csi/csi-lib-utils v0.13.0
+# github.com/kubernetes-csi/csi-lib-utils v0.15.0
## explicit; go 1.18
github.com/kubernetes-csi/csi-lib-utils/protosanitizer
-# github.com/kubernetes-csi/csi-proxy/client v1.0.1
+# github.com/kubernetes-csi/csi-proxy/client v1.1.3
## explicit; go 1.16
github.com/kubernetes-csi/csi-proxy/client
github.com/kubernetes-csi/csi-proxy/client/api/disk/v1
@@ -273,7 +294,7 @@ github.com/kubernetes-csi/csi-proxy/client/groups/filesystem/v1
github.com/kubernetes-csi/csi-proxy/client/groups/filesystem/v1beta1
github.com/kubernetes-csi/csi-proxy/client/groups/volume/v1
github.com/kubernetes-csi/csi-proxy/client/groups/volume/v1beta2
-# github.com/kubernetes-csi/external-snapshotter/client/v4 v4.1.0
+# github.com/kubernetes-csi/external-snapshotter/client/v4 v4.2.0
## explicit; go 1.15
github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1
github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1beta1
@@ -290,12 +311,9 @@ github.com/kylelemons/godebug/pretty
github.com/mailru/easyjson/buffer
github.com/mailru/easyjson/jlexer
github.com/mailru/easyjson/jwriter
-# github.com/matttproud/golang_protobuf_extensions v1.0.2
+# github.com/matttproud/golang_protobuf_extensions v1.0.4
## explicit; go 1.9
github.com/matttproud/golang_protobuf_extensions/pbutil
-# github.com/mitchellh/mapstructure v1.4.1
-## explicit; go 1.14
-github.com/mitchellh/mapstructure
# github.com/moby/spdystream v0.2.0
## explicit; go 1.13
github.com/moby/spdystream
@@ -312,7 +330,7 @@ github.com/modern-go/reflect2
# github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822
## explicit
github.com/munnerz/goautoneg
-# github.com/onsi/ginkgo/v2 v2.11.0
+# github.com/onsi/ginkgo/v2 v2.13.0
## explicit; go 1.18
github.com/onsi/ginkgo/v2
github.com/onsi/ginkgo/v2/config
@@ -334,7 +352,7 @@ github.com/onsi/ginkgo/v2/internal/parallel_support
github.com/onsi/ginkgo/v2/internal/testingtproxy
github.com/onsi/ginkgo/v2/reporters
github.com/onsi/ginkgo/v2/types
-# github.com/onsi/gomega v1.27.8
+# github.com/onsi/gomega v1.28.0
## explicit; go 1.18
github.com/onsi/gomega
github.com/onsi/gomega/format
@@ -356,10 +374,10 @@ github.com/opencontainers/selinux/go-selinux
github.com/opencontainers/selinux/go-selinux/label
github.com/opencontainers/selinux/pkg/pwalk
github.com/opencontainers/selinux/pkg/pwalkdir
-# github.com/pborman/uuid v1.2.0
+# github.com/pborman/uuid v1.2.1
## explicit
github.com/pborman/uuid
-# github.com/pelletier/go-toml v1.9.4
+# github.com/pelletier/go-toml v1.9.5
## explicit; go 1.12
github.com/pelletier/go-toml
# github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8
@@ -371,7 +389,7 @@ github.com/pkg/errors
# github.com/pmezard/go-difflib v1.0.0
## explicit
github.com/pmezard/go-difflib/difflib
-# github.com/prometheus/client_golang v1.14.0
+# github.com/prometheus/client_golang v1.16.0
## explicit; go 1.17
github.com/prometheus/client_golang/prometheus
github.com/prometheus/client_golang/prometheus/collectors
@@ -379,16 +397,16 @@ github.com/prometheus/client_golang/prometheus/internal
github.com/prometheus/client_golang/prometheus/promhttp
github.com/prometheus/client_golang/prometheus/testutil
github.com/prometheus/client_golang/prometheus/testutil/promlint
-# github.com/prometheus/client_model v0.3.0
-## explicit; go 1.9
+# github.com/prometheus/client_model v0.4.0
+## explicit; go 1.18
github.com/prometheus/client_model/go
-# github.com/prometheus/common v0.37.0
-## explicit; go 1.16
+# github.com/prometheus/common v0.44.0
+## explicit; go 1.18
github.com/prometheus/common/expfmt
github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg
github.com/prometheus/common/model
-# github.com/prometheus/procfs v0.8.0
-## explicit; go 1.17
+# github.com/prometheus/procfs v0.10.1
+## explicit; go 1.19
github.com/prometheus/procfs
github.com/prometheus/procfs/internal/fs
github.com/prometheus/procfs/internal/util
@@ -515,6 +533,10 @@ golang.org/x/crypto/pkcs12/internal/rc2
golang.org/x/crypto/salsa20/salsa
golang.org/x/crypto/ssh
golang.org/x/crypto/ssh/internal/bcrypt_pbkdf
+# golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e
+## explicit; go 1.18
+golang.org/x/exp/constraints
+golang.org/x/exp/slices
# golang.org/x/net v0.24.0
## explicit; go 1.18
golang.org/x/net/context
@@ -536,6 +558,7 @@ golang.org/x/oauth2
golang.org/x/oauth2/internal
# golang.org/x/sync v0.5.0
## explicit; go 1.18
+golang.org/x/sync/errgroup
golang.org/x/sync/singleflight
# golang.org/x/sys v0.19.0
## explicit; go 1.18
@@ -559,11 +582,19 @@ golang.org/x/text/encoding/korean
golang.org/x/text/encoding/simplifiedchinese
golang.org/x/text/encoding/traditionalchinese
golang.org/x/text/encoding/unicode
+golang.org/x/text/feature/plural
+golang.org/x/text/internal
+golang.org/x/text/internal/catmsg
+golang.org/x/text/internal/format
golang.org/x/text/internal/language
golang.org/x/text/internal/language/compact
+golang.org/x/text/internal/number
+golang.org/x/text/internal/stringset
golang.org/x/text/internal/tag
golang.org/x/text/internal/utf8internal
golang.org/x/text/language
+golang.org/x/text/message
+golang.org/x/text/message/catalog
golang.org/x/text/runes
golang.org/x/text/secure/bidirule
golang.org/x/text/transform
@@ -705,7 +736,7 @@ gopkg.in/yaml.v2
# gopkg.in/yaml.v3 v3.0.1
## explicit
gopkg.in/yaml.v3
-# k8s.io/api v0.27.13 => k8s.io/api v0.27.13
+# k8s.io/api v0.28.0 => k8s.io/api v0.27.13
## explicit; go 1.20
k8s.io/api/admission/v1
k8s.io/api/admission/v1beta1
@@ -765,7 +796,7 @@ k8s.io/api/storage/v1beta1
## explicit; go 1.20
k8s.io/apiextensions-apiserver/pkg/apis/apiextensions
k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1
-# k8s.io/apimachinery v0.27.13 => k8s.io/apimachinery v0.27.13
+# k8s.io/apimachinery v0.28.2 => k8s.io/apimachinery v0.28.2
## explicit; go 1.20
k8s.io/apimachinery/pkg/api/equality
k8s.io/apimachinery/pkg/api/errors
@@ -797,6 +828,7 @@ k8s.io/apimachinery/pkg/selection
k8s.io/apimachinery/pkg/types
k8s.io/apimachinery/pkg/util/cache
k8s.io/apimachinery/pkg/util/diff
+k8s.io/apimachinery/pkg/util/dump
k8s.io/apimachinery/pkg/util/errors
k8s.io/apimachinery/pkg/util/framer
k8s.io/apimachinery/pkg/util/httpstream
@@ -967,7 +999,7 @@ k8s.io/apiserver/plugin/pkg/audit/truncate
k8s.io/apiserver/plugin/pkg/audit/webhook
k8s.io/apiserver/plugin/pkg/authenticator/token/webhook
k8s.io/apiserver/plugin/pkg/authorizer/webhook
-# k8s.io/client-go v0.27.13
+# k8s.io/client-go v0.28.0
## explicit; go 1.20
k8s.io/client-go/applyconfigurations/admissionregistration/v1
k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1
@@ -1252,7 +1284,7 @@ k8s.io/cloud-provider/service/helpers
k8s.io/cloud-provider/volume
k8s.io/cloud-provider/volume/errors
k8s.io/cloud-provider/volume/helpers
-# k8s.io/component-base v0.27.13 => k8s.io/component-base v0.27.13
+# k8s.io/component-base v0.28.0 => k8s.io/component-base v0.27.13
## explicit; go 1.20
k8s.io/component-base/cli/flag
k8s.io/component-base/config
@@ -1307,7 +1339,7 @@ k8s.io/kms/apis/v1beta1
k8s.io/kms/apis/v2
k8s.io/kms/pkg/service
k8s.io/kms/pkg/util
-# k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f
+# k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9
## explicit; go 1.19
k8s.io/kube-openapi/pkg/builder
k8s.io/kube-openapi/pkg/builder3
@@ -1409,7 +1441,7 @@ k8s.io/mount-utils
## explicit; go 1.20
k8s.io/pod-security-admission/api
k8s.io/pod-security-admission/policy
-# k8s.io/utils v0.0.0-20230209194617-a36077c30491
+# k8s.io/utils v0.0.0-20230505201702-9f6742963106
## explicit; go 1.18
k8s.io/utils/buffer
k8s.io/utils/clock
@@ -1487,6 +1519,36 @@ sigs.k8s.io/cloud-provider-azure/pkg/provider/virtualmachine
sigs.k8s.io/cloud-provider-azure/pkg/retry
sigs.k8s.io/cloud-provider-azure/pkg/util/deepcopy
sigs.k8s.io/cloud-provider-azure/pkg/version
+# sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.0-20231012171618-1890d8703623
+## explicit; go 1.20
+sigs.k8s.io/cloud-provider-azure/pkg/azclient
+sigs.k8s.io/cloud-provider-azure/pkg/azclient/availabilitysetclient
+sigs.k8s.io/cloud-provider-azure/pkg/azclient/deploymentclient
+sigs.k8s.io/cloud-provider-azure/pkg/azclient/diskclient
+sigs.k8s.io/cloud-provider-azure/pkg/azclient/interfaceclient
+sigs.k8s.io/cloud-provider-azure/pkg/azclient/ipgroupclient
+sigs.k8s.io/cloud-provider-azure/pkg/azclient/loadbalancerclient
+sigs.k8s.io/cloud-provider-azure/pkg/azclient/managedclusterclient
+sigs.k8s.io/cloud-provider-azure/pkg/azclient/policy/ratelimit
+sigs.k8s.io/cloud-provider-azure/pkg/azclient/policy/ratelimit/flowcontrol
+sigs.k8s.io/cloud-provider-azure/pkg/azclient/policy/retryrepectthrottled
+sigs.k8s.io/cloud-provider-azure/pkg/azclient/privateendpointclient
+sigs.k8s.io/cloud-provider-azure/pkg/azclient/privatelinkserviceclient
+sigs.k8s.io/cloud-provider-azure/pkg/azclient/privatezoneclient
+sigs.k8s.io/cloud-provider-azure/pkg/azclient/publicipaddressclient
+sigs.k8s.io/cloud-provider-azure/pkg/azclient/publicipprefixclient
+sigs.k8s.io/cloud-provider-azure/pkg/azclient/resourcegroupclient
+sigs.k8s.io/cloud-provider-azure/pkg/azclient/routetableclient
+sigs.k8s.io/cloud-provider-azure/pkg/azclient/securitygroupclient
+sigs.k8s.io/cloud-provider-azure/pkg/azclient/snapshotclient
+sigs.k8s.io/cloud-provider-azure/pkg/azclient/sshpublickeyresourceclient
+sigs.k8s.io/cloud-provider-azure/pkg/azclient/subnetclient
+sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils
+sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils/armbalancer
+sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachineclient
+sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachinescalesetclient
+sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachinescalesetvmclient
+sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualnetworkclient
# sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd
## explicit; go 1.18
sigs.k8s.io/json
@@ -1505,7 +1567,7 @@ sigs.k8s.io/yaml
# go.etcd.io/etcd => go.etcd.io/etcd v0.0.0-20200410171415-59f5fb25a533
# k8s.io/api => k8s.io/api v0.27.13
# k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.27.13
-# k8s.io/apimachinery => k8s.io/apimachinery v0.27.13
+# k8s.io/apimachinery => k8s.io/apimachinery v0.28.2
# k8s.io/apiserver => k8s.io/apiserver v0.27.13
# k8s.io/cli-runtime => k8s.io/cli-runtime v0.27.13
# k8s.io/cloud-provider => k8s.io/cloud-provider v0.27.13
diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/LICENSE b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/LICENSE
new file mode 100644
index 0000000000..d645695673
--- /dev/null
+++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/Makefile b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/Makefile
new file mode 100644
index 0000000000..74aec35680
--- /dev/null
+++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/Makefile
@@ -0,0 +1,138 @@
+# Copyright 2022 The Kubernetes Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set)
+ifeq (,$(shell go env GOBIN))
+GOBIN=$(shell go env GOPATH)/bin
+else
+GOBIN=$(shell go env GOBIN)
+endif
+
+## Location to install dependencies to
+LOCALBIN ?= $(shell pwd)/bin
+$(LOCALBIN):
+ mkdir -p $(LOCALBIN)
+
+# Setting SHELL to bash allows bash commands to be executed by recipes.
+# Options are set to exit when a recipe line exits non-zero or a piped command fails.
+SHELL = /usr/bin/env bash -o pipefail
+.SHELLFLAGS = -ec
+
+.PHONY: all
+all: generate
+
+##@ General
+
+# The help target prints out all targets with their descriptions organized
+# beneath their categories. The categories are represented by '##@' and the
+# target descriptions by '##'. The awk commands is responsible for reading the
+# entire set of makefiles included in this invocation, looking for lines of the
+# file as xyz: ## something, and then pretty-format the target and help. Then,
+# if there's a line with ##@ something, that gets pretty-printed as a category.
+# More info on the usage of ANSI control characters for terminal formatting:
+# https://en.wikipedia.org/wiki/ANSI_escape_code#SGR_parameters
+# More info on the awk command:
+# http://linuxcommand.org/lc3_adv_awk.php
+
+.PHONY: help
+help: ## Display this help.
+ @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-15s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST)
+
+##@ Development
+.PHONY: fmt
+fmt: goimports ## Run go fmt against code.
+ $(GOIMPORTS) -w -local sigs.k8s.io/cloud-provider-azure/pkg/azclient .
+
+.PHONY: vet
+vet: golangci-lint ## Run go vet against code.
+ pushd client-gen; $(LOCALBIN)/golangci-lint run --timeout 10m ./... ;popd
+
+##@ Build
+.PHONY: build
+TYPESCAFFOLD = $(LOCALBIN)/typescaffold
+CLIENTGEN = $(LOCALBIN)/client-gen
+build: fmt vet ## Build manager binary.
+ pushd client-gen; CGO_ENABLED=0 go build -o ../bin/client-gen ./cmd/client-gen/ ;popd
+ pushd client-gen; CGO_ENABLED=0 go build -o ../bin/typescaffold ./cmd/typescaffold/;popd
+
+.PHONY: generate
+generate: install-dependencies build generatecode generateimpl vet-all
+
+.PHONY: generatecode
+generatecode: build ## Generate client
+ $(TYPESCAFFOLD) --package github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4 --package-alias armcontainerservice --resource ManagedCluster --client-name ManagedClustersClient --ratelimitkey containerServiceRateLimit
+ $(TYPESCAFFOLD) --package github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources --package-alias resources --resource Deployment --client-name DeploymentsClient --verbs delete --ratelimitkey deploymentRateLimit
+ $(TYPESCAFFOLD) --package github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources --package-alias resources --resource ResourceGroup --client-name ResourceGroupsClient
+ $(TYPESCAFFOLD) --package github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 --package-alias armcompute --resource Disk --client-name DisksClient --ratelimitkey diskRateLimit
+ $(TYPESCAFFOLD) --package github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 --package-alias armcompute --resource AvailabilitySet --client-name AvailabilitySetsClient --verbs get,list --ratelimitkey availabilitySetRateLimit
+ $(TYPESCAFFOLD) --package github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 --package-alias armcompute --resource VirtualMachine --client-name VirtualMachinesClient --verbs createorupdate,delete,list --expand --ratelimitkey virtualMachineRateLimit
+ $(TYPESCAFFOLD) --package github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 --package-alias armcompute --resource VirtualMachineScaleSet --client-name VirtualMachineScaleSetsClient --verbs get,createorupdate,delete,list --ratelimitkey virtualMachineSizesRateLimit
+ $(TYPESCAFFOLD) --package github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 --package-alias armcompute --resource VirtualMachineScaleSet --subresource VirtualMachineScaleSetVM --client-name VirtualMachineScaleSetVMsClient --verbs get,delete,list
+ $(TYPESCAFFOLD) --package github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 --package-alias armcompute --resource Snapshot --client-name SnapshotsClient --verbs get,createorupdate,delete --ratelimitkey snapshotRateLimit
+ $(TYPESCAFFOLD) --package github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 --package-alias armcompute --resource SSHPublicKeyResource --client-name SSHPublicKeysClient --verbs get,listbyrg
+ $(TYPESCAFFOLD) --package github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 --package-alias armnetwork --resource VirtualNetwork --subresource Subnet --client-name SubnetsClient --verbs get,createorupdate,delete,list --expand --ratelimitkey subnetsRateLimit
+ $(TYPESCAFFOLD) --package github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 --package-alias armnetwork --resource VirtualNetwork --client-name VirtualNetworksClient --verbs get,createorupdate,delete,list --expand
+ $(TYPESCAFFOLD) --package github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 --package-alias armnetwork --resource Interface --client-name InterfacesClient --verbs get,createorupdate,delete,list --expand --ratelimitkey interfaceRateLimit
+ $(TYPESCAFFOLD) --package github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 --package-alias armnetwork --resource LoadBalancer --client-name LoadBalancersClient --verbs get,createorupdate,delete,list --expand --ratelimitkey loadBalancerRateLimit
+ $(TYPESCAFFOLD) --package github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 --package-alias armnetwork --resource PrivateEndpoint --client-name PrivateEndpointsClient --verbs get,createorupdate --expand --ratelimitkey privateEndpointRateLimit
+ $(TYPESCAFFOLD) --package github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 --package-alias armnetwork --resource PublicIPAddress --client-name PublicIPAddressesClient --verbs get,createorupdate,delete,list --expand --ratelimitkey publicIPAddressRateLimit
+ $(TYPESCAFFOLD) --package github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 --package-alias armnetwork --resource PublicIPPrefix --client-name PublicIPPrefixesClient --verbs get,createorupdate,delete,list --expand
+ $(TYPESCAFFOLD) --package github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 --package-alias armnetwork --resource RouteTable --client-name RouteTablesClient --verbs createorupdate,delete --ratelimitkey routeTableRateLimit
+ $(TYPESCAFFOLD) --package github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 --package-alias armnetwork --resource SecurityGroup --client-name SecurityGroupsClient --verbs get,createorupdate,delete,list --ratelimitkey securityGroupRateLimit
+ $(TYPESCAFFOLD) --package github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 --package-alias armnetwork --resource PrivateLinkService --client-name PrivateLinkServicesClient --verbs get,createorupdate,delete,list --expand --ratelimitkey privateLinkServiceRateLimit
+ $(TYPESCAFFOLD) --package github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 --package-alias armnetwork --resource IPGroup --client-name IPGroupsClient --verbs get,createorupdate,delete,listbyrg --expand --ratelimitkey ipGroupRateLimit
+ $(TYPESCAFFOLD) --package github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/privatedns/armprivatedns --package-alias armprivatedns --resource PrivateZone --client-name PrivateZonesClient --verbs get,createorupdate --ratelimitkey privateDNSRateLimit
+## $(TYPESCAFFOLD) --package github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage --package-alias armstorage --resource FileShare --client-name FileSharesClient --expand
+
+.PHONY: generateimpl
+generateimpl: build ## Generate client
+ $(CLIENTGEN) clientgen:headerFile=../../hack/boilerplate/boilerplate.gomock.txt paths=./...
+
+.PHONY: vet-all
+vet-all: golangci-lint ## Run go vet against code.
+ $(LOCALBIN)/golangci-lint run --timeout 10m ./...
+
+
+ifndef ignore-not-found
+ ignore-not-found = false
+endif
+##@ Build Dependencies
+
+.PHONY: install-dependencies
+install-dependencies: golangci-lint goimports mockgen ginkgo## Install all build dependencies.
+
+GOLANGCI_LINT ?= $(LOCALBIN)/golangci-lint
+.PHONY: golangci-lint
+golangci-lint: $(GOLANGCI_LINT) ## Download golangci-lint locally if necessary.
+$(GOLANGCI_LINT): $(LOCALBIN)
+ test -s $(LOCALBIN)/golangci-lint || curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(LOCALBIN) latest
+
+GOIMPORTS ?= $(LOCALBIN)/goimports
+.PHONY: goimports
+goimports: $(GOIMPORTS) ## Download goimports locally if necessary.
+$(GOIMPORTS): $(LOCALBIN)
+ test -s $(LOCALBIN)/goimports || GOBIN=$(LOCALBIN) go install golang.org/x/tools/cmd/goimports@latest
+
+MOCKGEN ?= $(LOCALBIN)/mockgen
+.PHONY: mockgen
+mockgen: $(MOCKGEN) ## Download mockgen locally if necessary.
+$(MOCKGEN): $(LOCALBIN)
+ test -s $(LOCALBIN)/mockgen || GOBIN=$(LOCALBIN) go install github.com/golang/mock/mockgen@v1.6.0
+
+GINKGO ?= $(LOCALBIN)/ginkgo
+.PHONY: ginkgo
+ginkgo: $(GINKGO) ## Download ginkgo locally if necessary.
+$(GINKGO): $(LOCALBIN)
+ test -s $(LOCALBIN)/ginkgo || GOBIN=$(LOCALBIN) go install github.com/onsi/ginkgo/v2/ginkgo@latest
diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/arm_conf.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/arm_conf.go
new file mode 100644
index 0000000000..55ddc722a1
--- /dev/null
+++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/arm_conf.go
@@ -0,0 +1,53 @@
+/*
+Copyright 2023 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package azclient
+
+import (
+ "strings"
+
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/policy"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud"
+
+ "sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils"
+)
+
+type ARMClientConfig struct {
+ // The cloud environment identifier. Takes values from https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azcore@v1.6.0/cloud
+ Cloud string `json:"cloud,omitempty" yaml:"cloud,omitempty"`
+ // The user agent for Azure customer usage attribution
+ UserAgent string `json:"userAgent,omitempty" yaml:"userAgent,omitempty"`
+ // ResourceManagerEndpoint is the cloud's resource manager endpoint. If set, cloud provider queries this endpoint
+ // in order to generate an autorest.Environment instance instead of using one of the pre-defined Environments.
+ ResourceManagerEndpoint string `json:"resourceManagerEndpoint,omitempty" yaml:"resourceManagerEndpoint,omitempty"`
+}
+
+func NewClientOptionFromARMClientConfig(config *ARMClientConfig) (*policy.ClientOptions, error) {
+ //Get default settings
+ options := utils.GetDefaultOption()
+ var err error
+ if config != nil {
+ //update user agent header
+ options.ClientOptions.Telemetry.ApplicationID = strings.TrimSpace(config.UserAgent)
+ //set cloud
+ var cloudConfig *cloud.Configuration
+ cloudConfig, err = GetAzureCloudConfig(config)
+ options.ClientOptions.Cloud = *cloudConfig
+ } else {
+ options.ClientOptions.Cloud = cloud.AzurePublic
+ }
+ return options, err
+}
diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/auth.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/auth.go
new file mode 100644
index 0000000000..67f12af480
--- /dev/null
+++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/auth.go
@@ -0,0 +1,242 @@
+/*
+Copyright 2023 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package azclient
+
+import (
+ "crypto/rsa"
+ "crypto/x509"
+ "fmt"
+ "os"
+ "strings"
+
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/policy"
+ "github.com/Azure/azure-sdk-for-go/sdk/azidentity"
+ "golang.org/x/crypto/pkcs12"
+
+ "sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils"
+)
+
+// AzureAuthConfig holds auth related part of cloud config
+type AzureAuthConfig struct {
+ // The AAD Tenant ID for the Subscription that the cluster is deployed in
+ TenantID string `json:"tenantId,omitempty" yaml:"tenantId,omitempty"`
+ // The ClientID for an AAD application with RBAC access to talk to Azure RM APIs
+ AADClientID string `json:"aadClientId,omitempty" yaml:"aadClientId,omitempty"`
+ // The ClientSecret for an AAD application with RBAC access to talk to Azure RM APIs
+ AADClientSecret string `json:"aadClientSecret,omitempty" yaml:"aadClientSecret,omitempty" datapolicy:"token"`
+ // The path of a client certificate for an AAD application with RBAC access to talk to Azure RM APIs
+ AADClientCertPath string `json:"aadClientCertPath,omitempty" yaml:"aadClientCertPath,omitempty"`
+ // The password of the client certificate for an AAD application with RBAC access to talk to Azure RM APIs
+ AADClientCertPassword string `json:"aadClientCertPassword,omitempty" yaml:"aadClientCertPassword,omitempty" datapolicy:"password"`
+ // Use managed service identity for the virtual machine to access Azure ARM APIs
+ UseManagedIdentityExtension bool `json:"useManagedIdentityExtension,omitempty" yaml:"useManagedIdentityExtension,omitempty"`
+ // UserAssignedIdentityID contains the Client ID of the user assigned MSI which is assigned to the underlying VMs. If empty the user assigned identity is not used.
+ // More details of the user assigned identity can be found at: https://docs.microsoft.com/en-us/azure/active-directory/managed-service-identity/overview
+ // For the user assigned identity specified here to be used, the UseManagedIdentityExtension has to be set to true.
+ UserAssignedIdentityID string `json:"userAssignedIdentityID,omitempty" yaml:"userAssignedIdentityID,omitempty"`
+ // The AAD Tenant ID for the Subscription that the network resources are deployed in.
+ NetworkResourceTenantID string `json:"networkResourceTenantID,omitempty" yaml:"networkResourceTenantID,omitempty"`
+ // The AAD federated token file
+ AADFederatedTokenFile string `json:"aadFederatedTokenFile,omitempty" yaml:"aadFederatedTokenFile,omitempty"`
+ // Use workload identity federation for the virtual machine to access Azure ARM APIs
+ UseFederatedWorkloadIdentityExtension bool `json:"useFederatedWorkloadIdentityExtension,omitempty" yaml:"useFederatedWorkloadIdentityExtension,omitempty"`
+}
+
+var (
+ // ErrorNoAuth indicates that no credentials are provided.
+ ErrorNoAuth = fmt.Errorf("no credentials provided for Azure cloud provider")
+)
+
+type AuthProvider struct {
+ FederatedIdentityCredential azcore.TokenCredential
+ ManagedIdentityCredential azcore.TokenCredential
+ ClientSecretCredential azcore.TokenCredential
+ NetworkClientSecretCredential azcore.TokenCredential
+ MultiTenantCredential azcore.TokenCredential
+ ClientCertificateCredential azcore.TokenCredential
+}
+
+func GetDefaultAuthClientOption(armConfig *ARMClientConfig) (*policy.ClientOptions, error) {
+ //Get default settings
+ options, err := NewClientOptionFromARMClientConfig(armConfig)
+ if err != nil {
+ return nil, err
+ }
+ return options, nil
+}
+
+func NewAuthProvider(config AzureAuthConfig, clientOption *policy.ClientOptions) (*AuthProvider, error) {
+ if clientOption == nil {
+ clientOption = &policy.ClientOptions{}
+ }
+ // these environment variables are injected by workload identity webhook
+ if tenantID := os.Getenv(utils.AzureTenantID); tenantID != "" {
+ config.TenantID = tenantID
+ }
+ if clientID := os.Getenv(utils.AzureClientID); clientID != "" {
+ config.AADClientID = clientID
+ }
+ var err error
+ // federatedIdentityCredential is used for workload identity federation
+ var federatedIdentityCredential azcore.TokenCredential
+ if federatedTokenFile := os.Getenv(utils.AzureFederatedTokenFile); federatedTokenFile != "" {
+ config.AADFederatedTokenFile = federatedTokenFile
+ config.UseFederatedWorkloadIdentityExtension = true
+ }
+ if config.UseFederatedWorkloadIdentityExtension {
+ federatedIdentityCredential, err = azidentity.NewWorkloadIdentityCredential(&azidentity.WorkloadIdentityCredentialOptions{
+ ClientOptions: clientOption.ClientOptions,
+ ClientID: config.AADClientID,
+ TenantID: config.TenantID,
+ TokenFilePath: config.AADFederatedTokenFile,
+ })
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ // managedIdentityCredential is used for managed identity extension
+ var managedIdentityCredential azcore.TokenCredential
+ if config.UseManagedIdentityExtension {
+ credOptions := &azidentity.ManagedIdentityCredentialOptions{
+ ClientOptions: clientOption.ClientOptions,
+ }
+ if len(config.UserAssignedIdentityID) > 0 {
+ if strings.Contains(strings.ToUpper(config.UserAssignedIdentityID), "/SUBSCRIPTIONS/") {
+ credOptions.ID = azidentity.ResourceID(config.UserAssignedIdentityID)
+ } else {
+ credOptions.ID = azidentity.ClientID(config.UserAssignedIdentityID)
+ }
+ }
+ managedIdentityCredential, err = azidentity.NewManagedIdentityCredential(credOptions)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ // ClientSecretCredential is used for client secret
+ var clientSecretCredential azcore.TokenCredential
+ var networkClientSecretCredential azcore.TokenCredential
+ var multiTenantCredential azcore.TokenCredential
+ if len(config.AADClientSecret) > 0 {
+ credOptions := &azidentity.ClientSecretCredentialOptions{
+ ClientOptions: clientOption.ClientOptions,
+ }
+ clientSecretCredential, err = azidentity.NewClientSecretCredential(config.TenantID, config.AADClientID, config.AADClientSecret, credOptions)
+ if err != nil {
+ return nil, err
+ }
+ if len(config.NetworkResourceTenantID) > 0 && !strings.EqualFold(config.NetworkResourceTenantID, config.TenantID) {
+ credOptions := &azidentity.ClientSecretCredentialOptions{
+ ClientOptions: clientOption.ClientOptions,
+ }
+ networkClientSecretCredential, err = azidentity.NewClientSecretCredential(config.NetworkResourceTenantID, config.AADClientID, config.AADClientSecret, credOptions)
+ if err != nil {
+ return nil, err
+ }
+
+ credOptions = &azidentity.ClientSecretCredentialOptions{
+ ClientOptions: clientOption.ClientOptions,
+ AdditionallyAllowedTenants: []string{config.NetworkResourceTenantID},
+ }
+ multiTenantCredential, err = azidentity.NewClientSecretCredential(config.TenantID, config.AADClientID, config.AADClientSecret, credOptions)
+ if err != nil {
+ return nil, err
+ }
+
+ }
+ }
+
+ // ClientCertificateCredential is used for client certificate
+ var clientCertificateCredential azcore.TokenCredential
+ if len(config.AADClientCertPath) > 0 && len(config.AADClientCertPassword) > 0 {
+ credOptions := &azidentity.ClientCertificateCredentialOptions{
+ ClientOptions: clientOption.ClientOptions,
+ SendCertificateChain: true,
+ }
+ certData, err := os.ReadFile(config.AADClientCertPath)
+ if err != nil {
+ return nil, fmt.Errorf("reading the client certificate from file %s: %w", config.AADClientCertPath, err)
+ }
+ certificate, privateKey, err := decodePkcs12(certData, config.AADClientCertPassword)
+ if err != nil {
+ return nil, fmt.Errorf("decoding the client certificate: %w", err)
+ }
+ clientCertificateCredential, err = azidentity.NewClientCertificateCredential(config.TenantID, config.AADClientID, []*x509.Certificate{certificate}, privateKey, credOptions)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return &AuthProvider{
+ FederatedIdentityCredential: federatedIdentityCredential,
+ ManagedIdentityCredential: managedIdentityCredential,
+ ClientSecretCredential: clientSecretCredential,
+ ClientCertificateCredential: clientCertificateCredential,
+ NetworkClientSecretCredential: networkClientSecretCredential,
+ MultiTenantCredential: multiTenantCredential,
+ }, nil
+}
+
+func (factory *AuthProvider) GetAzIdentity() (azcore.TokenCredential, error) {
+ switch true {
+ case factory.FederatedIdentityCredential != nil:
+ return factory.FederatedIdentityCredential, nil
+ case factory.ManagedIdentityCredential != nil:
+ return factory.ManagedIdentityCredential, nil
+ case factory.ClientSecretCredential != nil:
+ return factory.ClientSecretCredential, nil
+ case factory.ClientCertificateCredential != nil:
+ return factory.ClientCertificateCredential, nil
+ default:
+ return nil, ErrorNoAuth
+ }
+}
+
+// decodePkcs12 decodes a PKCS#12 client certificate by extracting the public certificate and
+// the private RSA key
+func decodePkcs12(pkcs []byte, password string) (*x509.Certificate, *rsa.PrivateKey, error) {
+ privateKey, certificate, err := pkcs12.Decode(pkcs, password)
+ if err != nil {
+ return nil, nil, fmt.Errorf("decoding the PKCS#12 client certificate: %w", err)
+ }
+ rsaPrivateKey, isRsaKey := privateKey.(*rsa.PrivateKey)
+ if !isRsaKey {
+ return nil, nil, fmt.Errorf("PKCS#12 certificate must contain a RSA private key")
+ }
+
+ return certificate, rsaPrivateKey, nil
+}
+
+func (factory *AuthProvider) GetNetworkAzIdentity() (azcore.TokenCredential, error) {
+ if factory.NetworkClientSecretCredential != nil {
+ return factory.NetworkClientSecretCredential, nil
+ }
+ return nil, ErrorNoAuth
+}
+
+func (factory *AuthProvider) GetMultiTenantIdentity() (azcore.TokenCredential, error) {
+ if factory.MultiTenantCredential != nil {
+ return factory.MultiTenantCredential, nil
+ }
+ return nil, ErrorNoAuth
+}
+
+func (factory *AuthProvider) IsMultiTenantModeEnabled() bool {
+ return factory.MultiTenantCredential != nil
+}
diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/availabilitysetclient/interface.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/availabilitysetclient/interface.go
new file mode 100644
index 0000000000..fe07613584
--- /dev/null
+++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/availabilitysetclient/interface.go
@@ -0,0 +1,30 @@
+/*
+Copyright 2023 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +azure:enableclientgen:=true
+package availabilitysetclient
+
+import (
+ armcompute "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5"
+
+ "sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils"
+)
+
+// +azure:client:verbs=get;list,resource=AvailabilitySet,packageName=github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5,packageAlias=armcompute,clientName=AvailabilitySetsClient,expand=false,rateLimitKey=availabilitySetRateLimit
+type Interface interface {
+ utils.GetFunc[armcompute.AvailabilitySet]
+ utils.ListFunc[armcompute.AvailabilitySet]
+}
diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/availabilitysetclient/zz_generated_client.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/availabilitysetclient/zz_generated_client.go
new file mode 100644
index 0000000000..7fee64267f
--- /dev/null
+++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/availabilitysetclient/zz_generated_client.go
@@ -0,0 +1,68 @@
+// /*
+// Copyright The Kubernetes Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+// */
+
+// Code generated by client-gen. DO NOT EDIT.
+package availabilitysetclient
+
+import (
+ "context"
+
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm"
+ armcompute "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5"
+
+ "sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils"
+)
+
+type Client struct {
+ *armcompute.AvailabilitySetsClient
+}
+
+func New(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (Interface, error) {
+ if options == nil {
+ options = utils.GetDefaultOption()
+ }
+
+ client, err := armcompute.NewAvailabilitySetsClient(subscriptionID, credential, options)
+ if err != nil {
+ return nil, err
+ }
+ return &Client{client}, nil
+}
+
+// Get gets the AvailabilitySet
+func (client *Client) Get(ctx context.Context, resourceGroupName string, resourceName string) (result *armcompute.AvailabilitySet, rerr error) {
+
+ resp, err := client.AvailabilitySetsClient.Get(ctx, resourceGroupName, resourceName, nil)
+ if err != nil {
+ return nil, err
+ }
+ //handle statuscode
+ return &resp.AvailabilitySet, nil
+}
+
+// List gets a list of AvailabilitySet in the resource group.
+func (client *Client) List(ctx context.Context, resourceGroupName string) (result []*armcompute.AvailabilitySet, rerr error) {
+ pager := client.AvailabilitySetsClient.NewListPager(resourceGroupName, nil)
+ for pager.More() {
+ nextResult, err := pager.NextPage(ctx)
+ if err != nil {
+ return nil, err
+ }
+ result = append(result, nextResult.Value...)
+ }
+ return result, nil
+}
diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/cloud.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/cloud.go
new file mode 100644
index 0000000000..dd49602e80
--- /dev/null
+++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/cloud.go
@@ -0,0 +1,180 @@
+/*
+Copyright 2023 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package azclient
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "net/http"
+ "os"
+ "strings"
+
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud"
+)
+
+var EnvironmentMapping = map[string]*cloud.Configuration{
+ "AZURECHINACLOUD": &cloud.AzureChina,
+ "AZURECLOUD": &cloud.AzurePublic,
+ "AZUREPUBLICCLOUD": &cloud.AzurePublic,
+ "AZUREUSGOVERNMENT": &cloud.AzureGovernment,
+ "AZUREUSGOVERNMENTCLOUD": &cloud.AzureGovernment, //TODO: deprecate
+}
+
+const (
+ // EnvironmentFilepathName captures the name of the environment variable containing the path to the file
+ // to be used while populating the Azure Environment.
+ EnvironmentFilepathName = "AZURE_ENVIRONMENT_FILEPATH"
+)
+
+func AzureCloudConfigFromName(cloudName string) *cloud.Configuration {
+ if cloudName == "" {
+ return &cloud.AzurePublic
+ }
+ cloudName = strings.ToUpper(strings.TrimSpace(cloudName))
+ if cloudConfig, ok := EnvironmentMapping[cloudName]; ok {
+ return cloudConfig
+ }
+ return nil
+}
+
+// AzureCloudConfigFromURL returns cloud config from url
+// track2 sdk will add this one in the near future https://github.com/Azure/azure-sdk-for-go/issues/20959
+func AzureCloudConfigFromURL(endpoint string) (*cloud.Configuration, error) {
+ managementEndpoint := fmt.Sprintf("%s%s", strings.TrimSuffix(endpoint, "/"), "/metadata/endpoints?api-version=2019-05-01")
+ res, err := http.Get(managementEndpoint) //nolint
+ if err != nil {
+ return nil, err
+ }
+ body, err := io.ReadAll(res.Body)
+ if err != nil {
+ return nil, err
+ }
+ metadata := []struct {
+ Authentication struct {
+ Audiences []string
+ LoginEndpoint string
+ }
+ Name, ResourceManager string
+ }{}
+ err = json.Unmarshal(body, &metadata)
+ if err != nil {
+ return nil, err
+ }
+
+ if len(metadata) > 0 {
+ return &cloud.Configuration{
+ ActiveDirectoryAuthorityHost: metadata[0].Authentication.LoginEndpoint,
+ Services: map[cloud.ServiceName]cloud.ServiceConfiguration{
+ cloud.ResourceManager: {
+ Endpoint: metadata[0].ResourceManager,
+ Audience: metadata[0].Authentication.Audiences[0],
+ },
+ },
+ }, nil
+ }
+ return nil, nil
+}
+
+func AzureCloudConfigOverrideFromEnv(config *cloud.Configuration) (*cloud.Configuration, error) {
+ if config == nil {
+ config = &cloud.AzurePublic
+ }
+ envFilePath, ok := os.LookupEnv(EnvironmentFilepathName)
+ if !ok {
+ return config, nil
+ }
+ content, err := os.ReadFile(envFilePath)
+ if err != nil {
+ return nil, err
+ }
+ var envConfig Environment
+ if err = json.Unmarshal(content, &envConfig); err != nil {
+ return nil, err
+ }
+ if len(envConfig.ActiveDirectoryEndpoint) > 0 {
+ config.ActiveDirectoryAuthorityHost = envConfig.ActiveDirectoryEndpoint
+ }
+ if len(envConfig.ResourceManagerEndpoint) > 0 && len(envConfig.TokenAudience) > 0 {
+ config.Services[cloud.ResourceManager] = cloud.ServiceConfiguration{
+ Endpoint: envConfig.ResourceManagerEndpoint,
+ Audience: envConfig.TokenAudience,
+ }
+ }
+ return config, nil
+}
+
+func GetAzureCloudConfig(armConfig *ARMClientConfig) (*cloud.Configuration, error) {
+ var config *cloud.Configuration
+ var err error
+ if armConfig == nil {
+ config = &cloud.AzurePublic
+ } else {
+ config = AzureCloudConfigFromName(armConfig.Cloud)
+ if armConfig.ResourceManagerEndpoint != "" {
+ config, err = AzureCloudConfigFromURL(armConfig.ResourceManagerEndpoint)
+ if err != nil {
+ return nil, err
+ }
+ }
+ }
+ return AzureCloudConfigOverrideFromEnv(config)
+}
+
+// Environment represents a set of endpoints for each of Azure's Clouds.
+type Environment struct {
+ Name string `json:"name"`
+ ManagementPortalURL string `json:"managementPortalURL"`
+ PublishSettingsURL string `json:"publishSettingsURL"`
+ ServiceManagementEndpoint string `json:"serviceManagementEndpoint"`
+ ResourceManagerEndpoint string `json:"resourceManagerEndpoint"`
+ ActiveDirectoryEndpoint string `json:"activeDirectoryEndpoint"`
+ GalleryEndpoint string `json:"galleryEndpoint"`
+ KeyVaultEndpoint string `json:"keyVaultEndpoint"`
+ ManagedHSMEndpoint string `json:"managedHSMEndpoint"`
+ GraphEndpoint string `json:"graphEndpoint"`
+ ServiceBusEndpoint string `json:"serviceBusEndpoint"`
+ BatchManagementEndpoint string `json:"batchManagementEndpoint"`
+ MicrosoftGraphEndpoint string `json:"microsoftGraphEndpoint"`
+ StorageEndpointSuffix string `json:"storageEndpointSuffix"`
+ CosmosDBDNSSuffix string `json:"cosmosDBDNSSuffix"`
+ MariaDBDNSSuffix string `json:"mariaDBDNSSuffix"`
+ MySQLDatabaseDNSSuffix string `json:"mySqlDatabaseDNSSuffix"`
+ PostgresqlDatabaseDNSSuffix string `json:"postgresqlDatabaseDNSSuffix"`
+ SQLDatabaseDNSSuffix string `json:"sqlDatabaseDNSSuffix"`
+ TrafficManagerDNSSuffix string `json:"trafficManagerDNSSuffix"`
+ KeyVaultDNSSuffix string `json:"keyVaultDNSSuffix"`
+ ManagedHSMDNSSuffix string `json:"managedHSMDNSSuffix"`
+ ServiceBusEndpointSuffix string `json:"serviceBusEndpointSuffix"`
+ ServiceManagementVMDNSSuffix string `json:"serviceManagementVMDNSSuffix"`
+ ResourceManagerVMDNSSuffix string `json:"resourceManagerVMDNSSuffix"`
+ ContainerRegistryDNSSuffix string `json:"containerRegistryDNSSuffix"`
+ TokenAudience string `json:"tokenAudience"`
+ APIManagementHostNameSuffix string `json:"apiManagementHostNameSuffix"`
+ SynapseEndpointSuffix string `json:"synapseEndpointSuffix"`
+ DatalakeSuffix string `json:"datalakeSuffix"`
+ ResourceIdentifiers ResourceIdentifier `json:"resourceIdentifiers"`
+}
+
+// ResourceIdentifier contains a set of Azure resource IDs.
+type ResourceIdentifier struct {
+ Graph string `json:"graph"`
+ KeyVault string `json:"keyVault"`
+ Datalake string `json:"datalake"`
+ Batch string `json:"batch"`
+ OperationalInsights string `json:"operationalInsights"`
+}
diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/deploymentclient/custom.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/deploymentclient/custom.go
new file mode 100644
index 0000000000..8dd22ddc26
--- /dev/null
+++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/deploymentclient/custom.go
@@ -0,0 +1,59 @@
+/*
+Copyright 2023 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package deploymentclient
+
+import (
+ "context"
+
+ resources "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources"
+
+ "sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils"
+)
+
+// List gets a list of Deployment in the resource group.
+func (client *Client) List(ctx context.Context, resourceGroupName string) (result []*resources.DeploymentExtended, rerr error) {
+ pager := client.DeploymentsClient.NewListByResourceGroupPager(resourceGroupName, nil)
+ for pager.More() {
+ nextResult, err := pager.NextPage(ctx)
+ if err != nil {
+ return nil, err
+ }
+ result = append(result, nextResult.Value...)
+ }
+ return result, nil
+}
+
+// Get gets the Deployment
+func (client *Client) Get(ctx context.Context, resourceGroupName string, resourceName string) (result *resources.DeploymentExtended, rerr error) {
+ var ops *resources.DeploymentsClientGetOptions
+
+ resp, err := client.DeploymentsClient.Get(ctx, resourceGroupName, resourceName, ops)
+ if err != nil {
+ return nil, err
+ }
+ //handle statuscode
+ return &resp.DeploymentExtended, nil
+}
+
+// CreateOrUpdate creates or updates a Deployment.
+func (client *Client) CreateOrUpdate(ctx context.Context, resourceGroupName string, resourceName string, resource resources.Deployment) (*resources.DeploymentExtended, error) {
+ resp, err := utils.NewPollerWrapper(client.DeploymentsClient.BeginCreateOrUpdate(ctx, resourceGroupName, resourceName, resource, nil)).WaitforPollerResp(ctx)
+ if err != nil {
+ return nil, err
+ }
+ return &resp.DeploymentExtended, nil
+}
diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/deploymentclient/interface.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/deploymentclient/interface.go
new file mode 100644
index 0000000000..9a223dc6ce
--- /dev/null
+++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/deploymentclient/interface.go
@@ -0,0 +1,34 @@
+/*
+Copyright 2023 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +azure:enableclientgen:=true
+package deploymentclient
+
+import (
+ "context"
+
+ resources "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources"
+
+ "sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils"
+)
+
+// +azure:client:verbs=delete,resource=Deployment,packageName=github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources,packageAlias=resources,clientName=DeploymentsClient,expand=false,rateLimitKey=deploymentRateLimit
+type Interface interface {
+ Get(ctx context.Context, resourceGroupName string, resourceName string) (result *resources.DeploymentExtended, rerr error)
+ List(ctx context.Context, resourceGroupName string) (result []*resources.DeploymentExtended, rerr error)
+ CreateOrUpdate(ctx context.Context, resourceGroupName string, resourceName string, resourceParam resources.Deployment) (*resources.DeploymentExtended, error)
+ utils.DeleteFunc[resources.Deployment]
+}
diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/deploymentclient/zz_generated_client.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/deploymentclient/zz_generated_client.go
new file mode 100644
index 0000000000..e1fc7cd0d9
--- /dev/null
+++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/deploymentclient/zz_generated_client.go
@@ -0,0 +1,50 @@
+// /*
+// Copyright The Kubernetes Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+// */
+
+// Code generated by client-gen. DO NOT EDIT.
+package deploymentclient
+
+import (
+ "context"
+
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm"
+ resources "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources"
+
+ "sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils"
+)
+
+type Client struct {
+ *resources.DeploymentsClient
+}
+
+func New(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (Interface, error) {
+ if options == nil {
+ options = utils.GetDefaultOption()
+ }
+
+ client, err := resources.NewDeploymentsClient(subscriptionID, credential, options)
+ if err != nil {
+ return nil, err
+ }
+ return &Client{client}, nil
+}
+
+// Delete deletes a Deployment by name.
+func (client *Client) Delete(ctx context.Context, resourceGroupName string, resourceName string) error {
+ _, err := utils.NewPollerWrapper(client.BeginDelete(ctx, resourceGroupName, resourceName, nil)).WaitforPollerResp(ctx)
+ return err
+}
diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/diskclient/interface.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/diskclient/interface.go
new file mode 100644
index 0000000000..41d215c8f2
--- /dev/null
+++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/diskclient/interface.go
@@ -0,0 +1,32 @@
+/*
+Copyright 2023 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +azure:enableclientgen:=true
+package diskclient
+
+import (
+ armcompute "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5"
+
+ "sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils"
+)
+
+// +azure:client:verbs=get;createorupdate;delete;listbyrg,resource=Disk,packageName=github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5,packageAlias=armcompute,clientName=DisksClient,expand=false,rateLimitKey=diskRateLimit
+type Interface interface {
+ utils.GetFunc[armcompute.Disk]
+ utils.CreateOrUpdateFunc[armcompute.Disk]
+ utils.DeleteFunc[armcompute.Disk]
+ utils.ListFunc[armcompute.Disk]
+}
diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/diskclient/zz_generated_client.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/diskclient/zz_generated_client.go
new file mode 100644
index 0000000000..a2c0a3d3b3
--- /dev/null
+++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/diskclient/zz_generated_client.go
@@ -0,0 +1,86 @@
+// /*
+// Copyright The Kubernetes Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+// */
+
+// Code generated by client-gen. DO NOT EDIT.
+package diskclient
+
+import (
+ "context"
+
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm"
+ armcompute "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5"
+
+ "sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils"
+)
+
+type Client struct {
+ *armcompute.DisksClient
+}
+
+func New(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (Interface, error) {
+ if options == nil {
+ options = utils.GetDefaultOption()
+ }
+
+ client, err := armcompute.NewDisksClient(subscriptionID, credential, options)
+ if err != nil {
+ return nil, err
+ }
+ return &Client{client}, nil
+}
+
+// Get gets the Disk
+func (client *Client) Get(ctx context.Context, resourceGroupName string, resourceName string) (result *armcompute.Disk, rerr error) {
+
+ resp, err := client.DisksClient.Get(ctx, resourceGroupName, resourceName, nil)
+ if err != nil {
+ return nil, err
+ }
+ //handle statuscode
+ return &resp.Disk, nil
+}
+
+// CreateOrUpdate creates or updates a Disk.
+func (client *Client) CreateOrUpdate(ctx context.Context, resourceGroupName string, resourceName string, resource armcompute.Disk) (*armcompute.Disk, error) {
+ resp, err := utils.NewPollerWrapper(client.DisksClient.BeginCreateOrUpdate(ctx, resourceGroupName, resourceName, resource, nil)).WaitforPollerResp(ctx)
+ if err != nil {
+ return nil, err
+ }
+ if resp != nil {
+ return &resp.Disk, nil
+ }
+ return nil, nil
+}
+
+// Delete deletes a Disk by name.
+func (client *Client) Delete(ctx context.Context, resourceGroupName string, resourceName string) error {
+ _, err := utils.NewPollerWrapper(client.BeginDelete(ctx, resourceGroupName, resourceName, nil)).WaitforPollerResp(ctx)
+ return err
+}
+
+// List gets a list of Disk in the resource group.
+func (client *Client) List(ctx context.Context, resourceGroupName string) (result []*armcompute.Disk, rerr error) {
+ pager := client.DisksClient.NewListByResourceGroupPager(resourceGroupName, nil)
+ for pager.More() {
+ nextResult, err := pager.NextPage(ctx)
+ if err != nil {
+ return nil, err
+ }
+ result = append(result, nextResult.Value...)
+ }
+ return result, nil
+}
diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/factory.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/factory.go
new file mode 100644
index 0000000000..9f6f06df1f
--- /dev/null
+++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/factory.go
@@ -0,0 +1,68 @@
+// /*
+// Copyright The Kubernetes Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+// */
+
+// Code generated by client-gen. DO NOT EDIT.
+package azclient
+
+import (
+ "sigs.k8s.io/cloud-provider-azure/pkg/azclient/availabilitysetclient"
+ "sigs.k8s.io/cloud-provider-azure/pkg/azclient/deploymentclient"
+ "sigs.k8s.io/cloud-provider-azure/pkg/azclient/diskclient"
+ "sigs.k8s.io/cloud-provider-azure/pkg/azclient/interfaceclient"
+ "sigs.k8s.io/cloud-provider-azure/pkg/azclient/ipgroupclient"
+ "sigs.k8s.io/cloud-provider-azure/pkg/azclient/loadbalancerclient"
+ "sigs.k8s.io/cloud-provider-azure/pkg/azclient/managedclusterclient"
+ "sigs.k8s.io/cloud-provider-azure/pkg/azclient/privateendpointclient"
+ "sigs.k8s.io/cloud-provider-azure/pkg/azclient/privatelinkserviceclient"
+ "sigs.k8s.io/cloud-provider-azure/pkg/azclient/privatezoneclient"
+ "sigs.k8s.io/cloud-provider-azure/pkg/azclient/publicipaddressclient"
+ "sigs.k8s.io/cloud-provider-azure/pkg/azclient/publicipprefixclient"
+ "sigs.k8s.io/cloud-provider-azure/pkg/azclient/resourcegroupclient"
+ "sigs.k8s.io/cloud-provider-azure/pkg/azclient/routetableclient"
+ "sigs.k8s.io/cloud-provider-azure/pkg/azclient/securitygroupclient"
+ "sigs.k8s.io/cloud-provider-azure/pkg/azclient/snapshotclient"
+ "sigs.k8s.io/cloud-provider-azure/pkg/azclient/sshpublickeyresourceclient"
+ "sigs.k8s.io/cloud-provider-azure/pkg/azclient/subnetclient"
+ "sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachineclient"
+ "sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachinescalesetclient"
+ "sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachinescalesetvmclient"
+ "sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualnetworkclient"
+)
+
+type ClientFactory interface {
+ GetAvailabilitySetClient() availabilitysetclient.Interface
+ GetDeploymentClient() deploymentclient.Interface
+ GetDiskClient() diskclient.Interface
+ GetInterfaceClient() interfaceclient.Interface
+ GetIPGroupClient() ipgroupclient.Interface
+ GetLoadBalancerClient() loadbalancerclient.Interface
+ GetManagedClusterClient() managedclusterclient.Interface
+ GetPrivateEndpointClient() privateendpointclient.Interface
+ GetPrivateLinkServiceClient() privatelinkserviceclient.Interface
+ GetPrivateZoneClient() privatezoneclient.Interface
+ GetPublicIPAddressClient() publicipaddressclient.Interface
+ GetPublicIPPrefixClient() publicipprefixclient.Interface
+ GetResourceGroupClient() resourcegroupclient.Interface
+ GetRouteTableClient() routetableclient.Interface
+ GetSecurityGroupClient() securitygroupclient.Interface
+ GetSnapshotClient() snapshotclient.Interface
+ GetSSHPublicKeyResourceClient() sshpublickeyresourceclient.Interface
+ GetSubnetClient() subnetclient.Interface
+ GetVirtualMachineClient() virtualmachineclient.Interface
+ GetVirtualMachineScaleSetClient() virtualmachinescalesetclient.Interface
+ GetVirtualMachineScaleSetVMClient() virtualmachinescalesetvmclient.Interface
+ GetVirtualNetworkClient() virtualnetworkclient.Interface
+}
diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/factory_conf.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/factory_conf.go
new file mode 100644
index 0000000000..4b12a3cfcf
--- /dev/null
+++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/factory_conf.go
@@ -0,0 +1,69 @@
+/*
+Copyright 2023 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package azclient
+
+import (
+ "context"
+ "net/http"
+ "sync"
+
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/policy"
+
+ "sigs.k8s.io/cloud-provider-azure/pkg/azclient/policy/ratelimit"
+ "sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils"
+ "sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils/armbalancer"
+)
+
+var DefaultResourceClientTransport *http.Client
+var once sync.Once
+
+func init() {
+ once.Do(func() {
+ DefaultResourceClientTransport = &http.Client{
+ Transport: armbalancer.New(context.Background(), armbalancer.Options{
+ Transport: utils.DefaultTransport,
+ PoolSize: 100,
+ }),
+ }
+ })
+}
+
+type ClientFactoryConfig struct {
+ ratelimit.CloudProviderRateLimitConfig
+
+ // Enable exponential backoff to manage resource request retries
+ CloudProviderBackoff bool `json:"cloudProviderBackoff,omitempty" yaml:"cloudProviderBackoff,omitempty"`
+
+ // The ID of the Azure Subscription that the cluster is deployed in
+ SubscriptionID string `json:"subscriptionId,omitempty" yaml:"subscriptionId,omitempty"`
+}
+
+func GetDefaultResourceClientOption(armConfig *ARMClientConfig, factoryConfig *ClientFactoryConfig) (*policy.ClientOptions, error) {
+ //Get default settings
+ options, err := NewClientOptionFromARMClientConfig(armConfig)
+ if err != nil {
+ return nil, err
+ }
+ if factoryConfig != nil {
+ //Set retry
+ if !factoryConfig.CloudProviderBackoff {
+ options.Retry.MaxRetries = 0
+ }
+ }
+ options.Transport = DefaultResourceClientTransport
+ return options, err
+}
diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/factory_gen.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/factory_gen.go
new file mode 100644
index 0000000000..0eb25bad61
--- /dev/null
+++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/factory_gen.go
@@ -0,0 +1,532 @@
+// /*
+// Copyright The Kubernetes Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+// */
+
+// Code generated by client-gen. DO NOT EDIT.
+package azclient
+
+import (
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
+ "github.com/Azure/azure-sdk-for-go/sdk/azidentity"
+
+ "sigs.k8s.io/cloud-provider-azure/pkg/azclient/availabilitysetclient"
+ "sigs.k8s.io/cloud-provider-azure/pkg/azclient/deploymentclient"
+ "sigs.k8s.io/cloud-provider-azure/pkg/azclient/diskclient"
+ "sigs.k8s.io/cloud-provider-azure/pkg/azclient/interfaceclient"
+ "sigs.k8s.io/cloud-provider-azure/pkg/azclient/ipgroupclient"
+ "sigs.k8s.io/cloud-provider-azure/pkg/azclient/loadbalancerclient"
+ "sigs.k8s.io/cloud-provider-azure/pkg/azclient/managedclusterclient"
+ "sigs.k8s.io/cloud-provider-azure/pkg/azclient/policy/ratelimit"
+ "sigs.k8s.io/cloud-provider-azure/pkg/azclient/privateendpointclient"
+ "sigs.k8s.io/cloud-provider-azure/pkg/azclient/privatelinkserviceclient"
+ "sigs.k8s.io/cloud-provider-azure/pkg/azclient/privatezoneclient"
+ "sigs.k8s.io/cloud-provider-azure/pkg/azclient/publicipaddressclient"
+ "sigs.k8s.io/cloud-provider-azure/pkg/azclient/publicipprefixclient"
+ "sigs.k8s.io/cloud-provider-azure/pkg/azclient/resourcegroupclient"
+ "sigs.k8s.io/cloud-provider-azure/pkg/azclient/routetableclient"
+ "sigs.k8s.io/cloud-provider-azure/pkg/azclient/securitygroupclient"
+ "sigs.k8s.io/cloud-provider-azure/pkg/azclient/snapshotclient"
+ "sigs.k8s.io/cloud-provider-azure/pkg/azclient/sshpublickeyresourceclient"
+ "sigs.k8s.io/cloud-provider-azure/pkg/azclient/subnetclient"
+ "sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachineclient"
+ "sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachinescalesetclient"
+ "sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachinescalesetvmclient"
+ "sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualnetworkclient"
+)
+
+type ClientFactoryImpl struct {
+ *ClientFactoryConfig
+ cred azcore.TokenCredential
+ availabilitysetclientInterface availabilitysetclient.Interface
+ deploymentclientInterface deploymentclient.Interface
+ diskclientInterface diskclient.Interface
+ interfaceclientInterface interfaceclient.Interface
+ ipgroupclientInterface ipgroupclient.Interface
+ loadbalancerclientInterface loadbalancerclient.Interface
+ managedclusterclientInterface managedclusterclient.Interface
+ privateendpointclientInterface privateendpointclient.Interface
+ privatelinkserviceclientInterface privatelinkserviceclient.Interface
+ privatezoneclientInterface privatezoneclient.Interface
+ publicipaddressclientInterface publicipaddressclient.Interface
+ publicipprefixclientInterface publicipprefixclient.Interface
+ resourcegroupclientInterface resourcegroupclient.Interface
+ routetableclientInterface routetableclient.Interface
+ securitygroupclientInterface securitygroupclient.Interface
+ snapshotclientInterface snapshotclient.Interface
+ sshpublickeyresourceclientInterface sshpublickeyresourceclient.Interface
+ subnetclientInterface subnetclient.Interface
+ virtualmachineclientInterface virtualmachineclient.Interface
+ virtualmachinescalesetclientInterface virtualmachinescalesetclient.Interface
+ virtualmachinescalesetvmclientInterface virtualmachinescalesetvmclient.Interface
+ virtualnetworkclientInterface virtualnetworkclient.Interface
+}
+
+func NewClientFactory(config *ClientFactoryConfig, armConfig *ARMClientConfig, cred azcore.TokenCredential) (ClientFactory, error) {
+ if config == nil {
+ config = &ClientFactoryConfig{}
+ }
+ if cred == nil {
+ cred = &azidentity.DefaultAzureCredential{}
+ }
+
+ var options *arm.ClientOptions
+ var err error
+
+ //initialize {availabilitysetclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/availabilitysetclient AvailabilitySet Interface availabilitySetRateLimit}
+ options, err = GetDefaultResourceClientOption(armConfig, config)
+ if err != nil {
+ return nil, err
+ }
+
+ var ratelimitOption *ratelimit.Config
+ var rateLimitPolicy policy.Policy
+ //add ratelimit policy
+ ratelimitOption = config.GetRateLimitConfig("availabilitySetRateLimit")
+ rateLimitPolicy = ratelimit.NewRateLimitPolicy(ratelimitOption)
+ if rateLimitPolicy != nil {
+ options.ClientOptions.PerCallPolicies = append(options.ClientOptions.PerCallPolicies, rateLimitPolicy)
+ }
+ availabilitysetclientInterface, err := availabilitysetclient.New(config.SubscriptionID, cred, options)
+ if err != nil {
+ return nil, err
+ }
+
+ //initialize {deploymentclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/deploymentclient Deployment Interface deploymentRateLimit}
+ options, err = GetDefaultResourceClientOption(armConfig, config)
+ if err != nil {
+ return nil, err
+ }
+ //add ratelimit policy
+ ratelimitOption = config.GetRateLimitConfig("deploymentRateLimit")
+ rateLimitPolicy = ratelimit.NewRateLimitPolicy(ratelimitOption)
+ if rateLimitPolicy != nil {
+ options.ClientOptions.PerCallPolicies = append(options.ClientOptions.PerCallPolicies, rateLimitPolicy)
+ }
+ deploymentclientInterface, err := deploymentclient.New(config.SubscriptionID, cred, options)
+ if err != nil {
+ return nil, err
+ }
+
+ //initialize {diskclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/diskclient Disk Interface diskRateLimit}
+ options, err = GetDefaultResourceClientOption(armConfig, config)
+ if err != nil {
+ return nil, err
+ }
+ //add ratelimit policy
+ ratelimitOption = config.GetRateLimitConfig("diskRateLimit")
+ rateLimitPolicy = ratelimit.NewRateLimitPolicy(ratelimitOption)
+ if rateLimitPolicy != nil {
+ options.ClientOptions.PerCallPolicies = append(options.ClientOptions.PerCallPolicies, rateLimitPolicy)
+ }
+ diskclientInterface, err := diskclient.New(config.SubscriptionID, cred, options)
+ if err != nil {
+ return nil, err
+ }
+
+ //initialize {interfaceclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/interfaceclient Interface Interface interfaceRateLimit}
+ options, err = GetDefaultResourceClientOption(armConfig, config)
+ if err != nil {
+ return nil, err
+ }
+ //add ratelimit policy
+ ratelimitOption = config.GetRateLimitConfig("interfaceRateLimit")
+ rateLimitPolicy = ratelimit.NewRateLimitPolicy(ratelimitOption)
+ if rateLimitPolicy != nil {
+ options.ClientOptions.PerCallPolicies = append(options.ClientOptions.PerCallPolicies, rateLimitPolicy)
+ }
+ interfaceclientInterface, err := interfaceclient.New(config.SubscriptionID, cred, options)
+ if err != nil {
+ return nil, err
+ }
+
+ //initialize {ipgroupclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/ipgroupclient IPGroup Interface ipGroupRateLimit}
+ options, err = GetDefaultResourceClientOption(armConfig, config)
+ if err != nil {
+ return nil, err
+ }
+ //add ratelimit policy
+ ratelimitOption = config.GetRateLimitConfig("ipGroupRateLimit")
+ rateLimitPolicy = ratelimit.NewRateLimitPolicy(ratelimitOption)
+ if rateLimitPolicy != nil {
+ options.ClientOptions.PerCallPolicies = append(options.ClientOptions.PerCallPolicies, rateLimitPolicy)
+ }
+ ipgroupclientInterface, err := ipgroupclient.New(config.SubscriptionID, cred, options)
+ if err != nil {
+ return nil, err
+ }
+
+ //initialize {loadbalancerclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/loadbalancerclient LoadBalancer Interface loadBalancerRateLimit}
+ options, err = GetDefaultResourceClientOption(armConfig, config)
+ if err != nil {
+ return nil, err
+ }
+ //add ratelimit policy
+ ratelimitOption = config.GetRateLimitConfig("loadBalancerRateLimit")
+ rateLimitPolicy = ratelimit.NewRateLimitPolicy(ratelimitOption)
+ if rateLimitPolicy != nil {
+ options.ClientOptions.PerCallPolicies = append(options.ClientOptions.PerCallPolicies, rateLimitPolicy)
+ }
+ loadbalancerclientInterface, err := loadbalancerclient.New(config.SubscriptionID, cred, options)
+ if err != nil {
+ return nil, err
+ }
+
+ //initialize {managedclusterclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/managedclusterclient ManagedCluster Interface containerServiceRateLimit}
+ options, err = GetDefaultResourceClientOption(armConfig, config)
+ if err != nil {
+ return nil, err
+ }
+ //add ratelimit policy
+ ratelimitOption = config.GetRateLimitConfig("containerServiceRateLimit")
+ rateLimitPolicy = ratelimit.NewRateLimitPolicy(ratelimitOption)
+ if rateLimitPolicy != nil {
+ options.ClientOptions.PerCallPolicies = append(options.ClientOptions.PerCallPolicies, rateLimitPolicy)
+ }
+ managedclusterclientInterface, err := managedclusterclient.New(config.SubscriptionID, cred, options)
+ if err != nil {
+ return nil, err
+ }
+
+ //initialize {privateendpointclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/privateendpointclient PrivateEndpoint Interface privateEndpointRateLimit}
+ options, err = GetDefaultResourceClientOption(armConfig, config)
+ if err != nil {
+ return nil, err
+ }
+ //add ratelimit policy
+ ratelimitOption = config.GetRateLimitConfig("privateEndpointRateLimit")
+ rateLimitPolicy = ratelimit.NewRateLimitPolicy(ratelimitOption)
+ if rateLimitPolicy != nil {
+ options.ClientOptions.PerCallPolicies = append(options.ClientOptions.PerCallPolicies, rateLimitPolicy)
+ }
+ privateendpointclientInterface, err := privateendpointclient.New(config.SubscriptionID, cred, options)
+ if err != nil {
+ return nil, err
+ }
+
+ //initialize {privatelinkserviceclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/privatelinkserviceclient PrivateLinkService Interface privateLinkServiceRateLimit}
+ options, err = GetDefaultResourceClientOption(armConfig, config)
+ if err != nil {
+ return nil, err
+ }
+ //add ratelimit policy
+ ratelimitOption = config.GetRateLimitConfig("privateLinkServiceRateLimit")
+ rateLimitPolicy = ratelimit.NewRateLimitPolicy(ratelimitOption)
+ if rateLimitPolicy != nil {
+ options.ClientOptions.PerCallPolicies = append(options.ClientOptions.PerCallPolicies, rateLimitPolicy)
+ }
+ privatelinkserviceclientInterface, err := privatelinkserviceclient.New(config.SubscriptionID, cred, options)
+ if err != nil {
+ return nil, err
+ }
+
+ //initialize {privatezoneclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/privatezoneclient PrivateZone Interface privateDNSRateLimit}
+ options, err = GetDefaultResourceClientOption(armConfig, config)
+ if err != nil {
+ return nil, err
+ }
+ //add ratelimit policy
+ ratelimitOption = config.GetRateLimitConfig("privateDNSRateLimit")
+ rateLimitPolicy = ratelimit.NewRateLimitPolicy(ratelimitOption)
+ if rateLimitPolicy != nil {
+ options.ClientOptions.PerCallPolicies = append(options.ClientOptions.PerCallPolicies, rateLimitPolicy)
+ }
+ privatezoneclientInterface, err := privatezoneclient.New(config.SubscriptionID, cred, options)
+ if err != nil {
+ return nil, err
+ }
+
+ //initialize {publicipaddressclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/publicipaddressclient PublicIPAddress Interface publicIPAddressRateLimit}
+ options, err = GetDefaultResourceClientOption(armConfig, config)
+ if err != nil {
+ return nil, err
+ }
+ //add ratelimit policy
+ ratelimitOption = config.GetRateLimitConfig("publicIPAddressRateLimit")
+ rateLimitPolicy = ratelimit.NewRateLimitPolicy(ratelimitOption)
+ if rateLimitPolicy != nil {
+ options.ClientOptions.PerCallPolicies = append(options.ClientOptions.PerCallPolicies, rateLimitPolicy)
+ }
+ publicipaddressclientInterface, err := publicipaddressclient.New(config.SubscriptionID, cred, options)
+ if err != nil {
+ return nil, err
+ }
+
+ //initialize {publicipprefixclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/publicipprefixclient PublicIPPrefix Interface }
+ options, err = GetDefaultResourceClientOption(armConfig, config)
+ if err != nil {
+ return nil, err
+ }
+
+ publicipprefixclientInterface, err := publicipprefixclient.New(config.SubscriptionID, cred, options)
+ if err != nil {
+ return nil, err
+ }
+
+ //initialize {resourcegroupclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/resourcegroupclient ResourceGroup Interface }
+ options, err = GetDefaultResourceClientOption(armConfig, config)
+ if err != nil {
+ return nil, err
+ }
+
+ resourcegroupclientInterface, err := resourcegroupclient.New(config.SubscriptionID, cred, options)
+ if err != nil {
+ return nil, err
+ }
+
+ //initialize {routetableclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/routetableclient RouteTable Interface routeTableRateLimit}
+ options, err = GetDefaultResourceClientOption(armConfig, config)
+ if err != nil {
+ return nil, err
+ }
+ //add ratelimit policy
+ ratelimitOption = config.GetRateLimitConfig("routeTableRateLimit")
+ rateLimitPolicy = ratelimit.NewRateLimitPolicy(ratelimitOption)
+ if rateLimitPolicy != nil {
+ options.ClientOptions.PerCallPolicies = append(options.ClientOptions.PerCallPolicies, rateLimitPolicy)
+ }
+ routetableclientInterface, err := routetableclient.New(config.SubscriptionID, cred, options)
+ if err != nil {
+ return nil, err
+ }
+
+ //initialize {securitygroupclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/securitygroupclient SecurityGroup Interface securityGroupRateLimit}
+ options, err = GetDefaultResourceClientOption(armConfig, config)
+ if err != nil {
+ return nil, err
+ }
+ //add ratelimit policy
+ ratelimitOption = config.GetRateLimitConfig("securityGroupRateLimit")
+ rateLimitPolicy = ratelimit.NewRateLimitPolicy(ratelimitOption)
+ if rateLimitPolicy != nil {
+ options.ClientOptions.PerCallPolicies = append(options.ClientOptions.PerCallPolicies, rateLimitPolicy)
+ }
+ securitygroupclientInterface, err := securitygroupclient.New(config.SubscriptionID, cred, options)
+ if err != nil {
+ return nil, err
+ }
+
+ //initialize {snapshotclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/snapshotclient Snapshot Interface snapshotRateLimit}
+ options, err = GetDefaultResourceClientOption(armConfig, config)
+ if err != nil {
+ return nil, err
+ }
+ //add ratelimit policy
+ ratelimitOption = config.GetRateLimitConfig("snapshotRateLimit")
+ rateLimitPolicy = ratelimit.NewRateLimitPolicy(ratelimitOption)
+ if rateLimitPolicy != nil {
+ options.ClientOptions.PerCallPolicies = append(options.ClientOptions.PerCallPolicies, rateLimitPolicy)
+ }
+ snapshotclientInterface, err := snapshotclient.New(config.SubscriptionID, cred, options)
+ if err != nil {
+ return nil, err
+ }
+
+ //initialize {sshpublickeyresourceclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/sshpublickeyresourceclient SSHPublicKeyResource Interface }
+ options, err = GetDefaultResourceClientOption(armConfig, config)
+ if err != nil {
+ return nil, err
+ }
+
+ sshpublickeyresourceclientInterface, err := sshpublickeyresourceclient.New(config.SubscriptionID, cred, options)
+ if err != nil {
+ return nil, err
+ }
+
+ //initialize {subnetclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/subnetclient VirtualNetwork Subnet Interface subnetsRateLimit}
+ options, err = GetDefaultResourceClientOption(armConfig, config)
+ if err != nil {
+ return nil, err
+ }
+ //add ratelimit policy
+ ratelimitOption = config.GetRateLimitConfig("subnetsRateLimit")
+ rateLimitPolicy = ratelimit.NewRateLimitPolicy(ratelimitOption)
+ if rateLimitPolicy != nil {
+ options.ClientOptions.PerCallPolicies = append(options.ClientOptions.PerCallPolicies, rateLimitPolicy)
+ }
+ subnetclientInterface, err := subnetclient.New(config.SubscriptionID, cred, options)
+ if err != nil {
+ return nil, err
+ }
+
+ //initialize {virtualmachineclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachineclient VirtualMachine Interface virtualMachineRateLimit}
+ options, err = GetDefaultResourceClientOption(armConfig, config)
+ if err != nil {
+ return nil, err
+ }
+ //add ratelimit policy
+ ratelimitOption = config.GetRateLimitConfig("virtualMachineRateLimit")
+ rateLimitPolicy = ratelimit.NewRateLimitPolicy(ratelimitOption)
+ if rateLimitPolicy != nil {
+ options.ClientOptions.PerCallPolicies = append(options.ClientOptions.PerCallPolicies, rateLimitPolicy)
+ }
+ virtualmachineclientInterface, err := virtualmachineclient.New(config.SubscriptionID, cred, options)
+ if err != nil {
+ return nil, err
+ }
+
+ //initialize {virtualmachinescalesetclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachinescalesetclient VirtualMachineScaleSet Interface virtualMachineSizesRateLimit}
+ options, err = GetDefaultResourceClientOption(armConfig, config)
+ if err != nil {
+ return nil, err
+ }
+ //add ratelimit policy
+ ratelimitOption = config.GetRateLimitConfig("virtualMachineSizesRateLimit")
+ rateLimitPolicy = ratelimit.NewRateLimitPolicy(ratelimitOption)
+ if rateLimitPolicy != nil {
+ options.ClientOptions.PerCallPolicies = append(options.ClientOptions.PerCallPolicies, rateLimitPolicy)
+ }
+ virtualmachinescalesetclientInterface, err := virtualmachinescalesetclient.New(config.SubscriptionID, cred, options)
+ if err != nil {
+ return nil, err
+ }
+
+ //initialize {virtualmachinescalesetvmclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachinescalesetvmclient VirtualMachineScaleSet VirtualMachineScaleSetVM Interface }
+ options, err = GetDefaultResourceClientOption(armConfig, config)
+ if err != nil {
+ return nil, err
+ }
+
+ virtualmachinescalesetvmclientInterface, err := virtualmachinescalesetvmclient.New(config.SubscriptionID, cred, options)
+ if err != nil {
+ return nil, err
+ }
+
+ //initialize {virtualnetworkclient sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualnetworkclient VirtualNetwork Interface }
+ options, err = GetDefaultResourceClientOption(armConfig, config)
+ if err != nil {
+ return nil, err
+ }
+
+ virtualnetworkclientInterface, err := virtualnetworkclient.New(config.SubscriptionID, cred, options)
+ if err != nil {
+ return nil, err
+ }
+
+ return &ClientFactoryImpl{
+ ClientFactoryConfig: config,
+ cred: cred, availabilitysetclientInterface: availabilitysetclientInterface,
+ deploymentclientInterface: deploymentclientInterface,
+ diskclientInterface: diskclientInterface,
+ interfaceclientInterface: interfaceclientInterface,
+ ipgroupclientInterface: ipgroupclientInterface,
+ loadbalancerclientInterface: loadbalancerclientInterface,
+ managedclusterclientInterface: managedclusterclientInterface,
+ privateendpointclientInterface: privateendpointclientInterface,
+ privatelinkserviceclientInterface: privatelinkserviceclientInterface,
+ privatezoneclientInterface: privatezoneclientInterface,
+ publicipaddressclientInterface: publicipaddressclientInterface,
+ publicipprefixclientInterface: publicipprefixclientInterface,
+ resourcegroupclientInterface: resourcegroupclientInterface,
+ routetableclientInterface: routetableclientInterface,
+ securitygroupclientInterface: securitygroupclientInterface,
+ snapshotclientInterface: snapshotclientInterface,
+ sshpublickeyresourceclientInterface: sshpublickeyresourceclientInterface,
+ subnetclientInterface: subnetclientInterface,
+ virtualmachineclientInterface: virtualmachineclientInterface,
+ virtualmachinescalesetclientInterface: virtualmachinescalesetclientInterface,
+ virtualmachinescalesetvmclientInterface: virtualmachinescalesetvmclientInterface,
+ virtualnetworkclientInterface: virtualnetworkclientInterface,
+ }, nil
+}
+
+func (factory *ClientFactoryImpl) GetAvailabilitySetClient() availabilitysetclient.Interface {
+ return factory.availabilitysetclientInterface
+}
+
+func (factory *ClientFactoryImpl) GetDeploymentClient() deploymentclient.Interface {
+ return factory.deploymentclientInterface
+}
+
+func (factory *ClientFactoryImpl) GetDiskClient() diskclient.Interface {
+ return factory.diskclientInterface
+}
+
+func (factory *ClientFactoryImpl) GetInterfaceClient() interfaceclient.Interface {
+ return factory.interfaceclientInterface
+}
+
+func (factory *ClientFactoryImpl) GetIPGroupClient() ipgroupclient.Interface {
+ return factory.ipgroupclientInterface
+}
+
+func (factory *ClientFactoryImpl) GetLoadBalancerClient() loadbalancerclient.Interface {
+ return factory.loadbalancerclientInterface
+}
+
+func (factory *ClientFactoryImpl) GetManagedClusterClient() managedclusterclient.Interface {
+ return factory.managedclusterclientInterface
+}
+
+func (factory *ClientFactoryImpl) GetPrivateEndpointClient() privateendpointclient.Interface {
+ return factory.privateendpointclientInterface
+}
+
+func (factory *ClientFactoryImpl) GetPrivateLinkServiceClient() privatelinkserviceclient.Interface {
+ return factory.privatelinkserviceclientInterface
+}
+
+func (factory *ClientFactoryImpl) GetPrivateZoneClient() privatezoneclient.Interface {
+ return factory.privatezoneclientInterface
+}
+
+func (factory *ClientFactoryImpl) GetPublicIPAddressClient() publicipaddressclient.Interface {
+ return factory.publicipaddressclientInterface
+}
+
+func (factory *ClientFactoryImpl) GetPublicIPPrefixClient() publicipprefixclient.Interface {
+ return factory.publicipprefixclientInterface
+}
+
+func (factory *ClientFactoryImpl) GetResourceGroupClient() resourcegroupclient.Interface {
+ return factory.resourcegroupclientInterface
+}
+
+func (factory *ClientFactoryImpl) GetRouteTableClient() routetableclient.Interface {
+ return factory.routetableclientInterface
+}
+
+func (factory *ClientFactoryImpl) GetSecurityGroupClient() securitygroupclient.Interface {
+ return factory.securitygroupclientInterface
+}
+
+func (factory *ClientFactoryImpl) GetSnapshotClient() snapshotclient.Interface {
+ return factory.snapshotclientInterface
+}
+
+func (factory *ClientFactoryImpl) GetSSHPublicKeyResourceClient() sshpublickeyresourceclient.Interface {
+ return factory.sshpublickeyresourceclientInterface
+}
+
+func (factory *ClientFactoryImpl) GetSubnetClient() subnetclient.Interface {
+ return factory.subnetclientInterface
+}
+
+func (factory *ClientFactoryImpl) GetVirtualMachineClient() virtualmachineclient.Interface {
+ return factory.virtualmachineclientInterface
+}
+
+func (factory *ClientFactoryImpl) GetVirtualMachineScaleSetClient() virtualmachinescalesetclient.Interface {
+ return factory.virtualmachinescalesetclientInterface
+}
+
+func (factory *ClientFactoryImpl) GetVirtualMachineScaleSetVMClient() virtualmachinescalesetvmclient.Interface {
+ return factory.virtualmachinescalesetvmclientInterface
+}
+
+func (factory *ClientFactoryImpl) GetVirtualNetworkClient() virtualnetworkclient.Interface {
+ return factory.virtualnetworkclientInterface
+}
diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/interfaceclient/interface.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/interfaceclient/interface.go
new file mode 100644
index 0000000000..cd169cbf99
--- /dev/null
+++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/interfaceclient/interface.go
@@ -0,0 +1,36 @@
+/*
+Copyright 2023 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +azure:enableclientgen:=true
+package interfaceclient
+
+import (
+ "context"
+
+ armnetwork "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4"
+
+ "sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils"
+)
+
+// +azure:client:verbs=get;createorupdate;delete;list,resource=Interface,packageName=github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4,packageAlias=armnetwork,clientName=InterfacesClient,expand=true,rateLimitKey=interfaceRateLimit
+type Interface interface {
+ // GetVirtualMachineScaleSetNetworkInterface gets a network.Interface of VMSS VM.
+ GetVirtualMachineScaleSetNetworkInterface(ctx context.Context, resourceGroupName string, virtualMachineScaleSetName string, virtualmachineIndex string, networkInterfaceName string, options *armnetwork.InterfacesClientGetVirtualMachineScaleSetNetworkInterfaceOptions) (armnetwork.InterfacesClientGetVirtualMachineScaleSetNetworkInterfaceResponse, error)
+ utils.GetWithExpandFunc[armnetwork.Interface]
+ utils.CreateOrUpdateFunc[armnetwork.Interface]
+ utils.DeleteFunc[armnetwork.Interface]
+ utils.ListFunc[armnetwork.Interface]
+}
diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/interfaceclient/zz_generated_client.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/interfaceclient/zz_generated_client.go
new file mode 100644
index 0000000000..38cacdf2f9
--- /dev/null
+++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/interfaceclient/zz_generated_client.go
@@ -0,0 +1,89 @@
+// /*
+// Copyright The Kubernetes Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+// */
+
+// Code generated by client-gen. DO NOT EDIT.
+package interfaceclient
+
+import (
+ "context"
+
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm"
+ armnetwork "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4"
+
+ "sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils"
+)
+
+type Client struct {
+ *armnetwork.InterfacesClient
+}
+
+func New(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (Interface, error) {
+ if options == nil {
+ options = utils.GetDefaultOption()
+ }
+
+ client, err := armnetwork.NewInterfacesClient(subscriptionID, credential, options)
+ if err != nil {
+ return nil, err
+ }
+ return &Client{client}, nil
+}
+
+// Get gets the Interface
+func (client *Client) Get(ctx context.Context, resourceGroupName string, resourceName string, expand *string) (result *armnetwork.Interface, rerr error) {
+ var ops *armnetwork.InterfacesClientGetOptions
+ if expand != nil {
+ ops = &armnetwork.InterfacesClientGetOptions{Expand: expand}
+ }
+ resp, err := client.InterfacesClient.Get(ctx, resourceGroupName, resourceName, ops)
+ if err != nil {
+ return nil, err
+ }
+ //handle statuscode
+ return &resp.Interface, nil
+}
+
+// CreateOrUpdate creates or updates a Interface.
+func (client *Client) CreateOrUpdate(ctx context.Context, resourceGroupName string, resourceName string, resource armnetwork.Interface) (*armnetwork.Interface, error) {
+ resp, err := utils.NewPollerWrapper(client.InterfacesClient.BeginCreateOrUpdate(ctx, resourceGroupName, resourceName, resource, nil)).WaitforPollerResp(ctx)
+ if err != nil {
+ return nil, err
+ }
+ if resp != nil {
+ return &resp.Interface, nil
+ }
+ return nil, nil
+}
+
+// Delete deletes a Interface by name.
+func (client *Client) Delete(ctx context.Context, resourceGroupName string, resourceName string) error {
+ _, err := utils.NewPollerWrapper(client.BeginDelete(ctx, resourceGroupName, resourceName, nil)).WaitforPollerResp(ctx)
+ return err
+}
+
+// List gets a list of Interface in the resource group.
+func (client *Client) List(ctx context.Context, resourceGroupName string) (result []*armnetwork.Interface, rerr error) {
+ pager := client.InterfacesClient.NewListPager(resourceGroupName, nil)
+ for pager.More() {
+ nextResult, err := pager.NextPage(ctx)
+ if err != nil {
+ return nil, err
+ }
+ result = append(result, nextResult.Value...)
+ }
+ return result, nil
+}
diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/ipgroupclient/interface.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/ipgroupclient/interface.go
new file mode 100644
index 0000000000..1e63337912
--- /dev/null
+++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/ipgroupclient/interface.go
@@ -0,0 +1,32 @@
+/*
+Copyright 2023 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +azure:enableclientgen:=true
+package ipgroupclient
+
+import (
+ armnetwork "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4"
+
+ "sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils"
+)
+
+// +azure:client:verbs=get;createorupdate;delete;listbyrg,resource=IPGroup,packageName=github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4,packageAlias=armnetwork,clientName=IPGroupsClient,expand=true,rateLimitKey=ipGroupRateLimit
+type Interface interface {
+ utils.GetWithExpandFunc[armnetwork.IPGroup]
+ utils.CreateOrUpdateFunc[armnetwork.IPGroup]
+ utils.DeleteFunc[armnetwork.IPGroup]
+ utils.ListFunc[armnetwork.IPGroup]
+}
diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/ipgroupclient/zz_generated_client.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/ipgroupclient/zz_generated_client.go
new file mode 100644
index 0000000000..4eba499089
--- /dev/null
+++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/ipgroupclient/zz_generated_client.go
@@ -0,0 +1,89 @@
+// /*
+// Copyright The Kubernetes Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+// */
+
+// Code generated by client-gen. DO NOT EDIT.
+package ipgroupclient
+
+import (
+ "context"
+
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm"
+ armnetwork "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4"
+
+ "sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils"
+)
+
+type Client struct {
+ *armnetwork.IPGroupsClient
+}
+
+func New(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (Interface, error) {
+ if options == nil {
+ options = utils.GetDefaultOption()
+ }
+
+ client, err := armnetwork.NewIPGroupsClient(subscriptionID, credential, options)
+ if err != nil {
+ return nil, err
+ }
+ return &Client{client}, nil
+}
+
+// Get gets the IPGroup
+func (client *Client) Get(ctx context.Context, resourceGroupName string, resourceName string, expand *string) (result *armnetwork.IPGroup, rerr error) {
+ var ops *armnetwork.IPGroupsClientGetOptions
+ if expand != nil {
+ ops = &armnetwork.IPGroupsClientGetOptions{Expand: expand}
+ }
+ resp, err := client.IPGroupsClient.Get(ctx, resourceGroupName, resourceName, ops)
+ if err != nil {
+ return nil, err
+ }
+ //handle statuscode
+ return &resp.IPGroup, nil
+}
+
+// CreateOrUpdate creates or updates a IPGroup.
+func (client *Client) CreateOrUpdate(ctx context.Context, resourceGroupName string, resourceName string, resource armnetwork.IPGroup) (*armnetwork.IPGroup, error) {
+ resp, err := utils.NewPollerWrapper(client.IPGroupsClient.BeginCreateOrUpdate(ctx, resourceGroupName, resourceName, resource, nil)).WaitforPollerResp(ctx)
+ if err != nil {
+ return nil, err
+ }
+ if resp != nil {
+ return &resp.IPGroup, nil
+ }
+ return nil, nil
+}
+
+// Delete deletes a IPGroup by name.
+func (client *Client) Delete(ctx context.Context, resourceGroupName string, resourceName string) error {
+ _, err := utils.NewPollerWrapper(client.BeginDelete(ctx, resourceGroupName, resourceName, nil)).WaitforPollerResp(ctx)
+ return err
+}
+
+// List gets a list of IPGroup in the resource group.
+func (client *Client) List(ctx context.Context, resourceGroupName string) (result []*armnetwork.IPGroup, rerr error) {
+ pager := client.IPGroupsClient.NewListByResourceGroupPager(resourceGroupName, nil)
+ for pager.More() {
+ nextResult, err := pager.NextPage(ctx)
+ if err != nil {
+ return nil, err
+ }
+ result = append(result, nextResult.Value...)
+ }
+ return result, nil
+}
diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/loadbalancerclient/interface.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/loadbalancerclient/interface.go
new file mode 100644
index 0000000000..232a04c3ec
--- /dev/null
+++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/loadbalancerclient/interface.go
@@ -0,0 +1,35 @@
+/*
+Copyright 2023 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +azure:enableclientgen:=true
+package loadbalancerclient
+
+import (
+ "context"
+
+ armnetwork "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4"
+
+ "sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils"
+)
+
+// +azure:client:verbs=get;createorupdate;delete;list,resource=LoadBalancer,packageName=github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4,packageAlias=armnetwork,clientName=LoadBalancersClient,expand=true,rateLimitKey=loadBalancerRateLimit
+type Interface interface {
+ utils.GetWithExpandFunc[armnetwork.LoadBalancer]
+ utils.CreateOrUpdateFunc[armnetwork.LoadBalancer]
+ utils.DeleteFunc[armnetwork.LoadBalancer]
+ utils.ListFunc[armnetwork.LoadBalancer]
+ MigrateToIPBased(ctx context.Context, groupName string, loadBalancerName string, options *armnetwork.LoadBalancersClientMigrateToIPBasedOptions) (armnetwork.LoadBalancersClientMigrateToIPBasedResponse, error)
+}
diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/loadbalancerclient/zz_generated_client.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/loadbalancerclient/zz_generated_client.go
new file mode 100644
index 0000000000..2187dbb992
--- /dev/null
+++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/loadbalancerclient/zz_generated_client.go
@@ -0,0 +1,89 @@
+// /*
+// Copyright The Kubernetes Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+// */
+
+// Code generated by client-gen. DO NOT EDIT.
+package loadbalancerclient
+
+import (
+ "context"
+
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm"
+ armnetwork "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4"
+
+ "sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils"
+)
+
+type Client struct {
+ *armnetwork.LoadBalancersClient
+}
+
+func New(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (Interface, error) {
+ if options == nil {
+ options = utils.GetDefaultOption()
+ }
+
+ client, err := armnetwork.NewLoadBalancersClient(subscriptionID, credential, options)
+ if err != nil {
+ return nil, err
+ }
+ return &Client{client}, nil
+}
+
+// Get gets the LoadBalancer
+func (client *Client) Get(ctx context.Context, resourceGroupName string, resourceName string, expand *string) (result *armnetwork.LoadBalancer, rerr error) {
+ var ops *armnetwork.LoadBalancersClientGetOptions
+ if expand != nil {
+ ops = &armnetwork.LoadBalancersClientGetOptions{Expand: expand}
+ }
+ resp, err := client.LoadBalancersClient.Get(ctx, resourceGroupName, resourceName, ops)
+ if err != nil {
+ return nil, err
+ }
+ //handle statuscode
+ return &resp.LoadBalancer, nil
+}
+
+// CreateOrUpdate creates or updates a LoadBalancer.
+func (client *Client) CreateOrUpdate(ctx context.Context, resourceGroupName string, resourceName string, resource armnetwork.LoadBalancer) (*armnetwork.LoadBalancer, error) {
+ resp, err := utils.NewPollerWrapper(client.LoadBalancersClient.BeginCreateOrUpdate(ctx, resourceGroupName, resourceName, resource, nil)).WaitforPollerResp(ctx)
+ if err != nil {
+ return nil, err
+ }
+ if resp != nil {
+ return &resp.LoadBalancer, nil
+ }
+ return nil, nil
+}
+
+// Delete deletes a LoadBalancer by name.
+func (client *Client) Delete(ctx context.Context, resourceGroupName string, resourceName string) error {
+ _, err := utils.NewPollerWrapper(client.BeginDelete(ctx, resourceGroupName, resourceName, nil)).WaitforPollerResp(ctx)
+ return err
+}
+
+// List gets a list of LoadBalancer in the resource group.
+func (client *Client) List(ctx context.Context, resourceGroupName string) (result []*armnetwork.LoadBalancer, rerr error) {
+ pager := client.LoadBalancersClient.NewListPager(resourceGroupName, nil)
+ for pager.More() {
+ nextResult, err := pager.NextPage(ctx)
+ if err != nil {
+ return nil, err
+ }
+ result = append(result, nextResult.Value...)
+ }
+ return result, nil
+}
diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/managedclusterclient/interface.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/managedclusterclient/interface.go
new file mode 100644
index 0000000000..5a030aa22c
--- /dev/null
+++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/managedclusterclient/interface.go
@@ -0,0 +1,32 @@
+/*
+Copyright 2023 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +azure:enableclientgen:=true
+package managedclusterclient
+
+import (
+ armcontainerservice "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4"
+
+ "sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils"
+)
+
+// +azure:client:verbs=get;createorupdate;delete;listbyrg,resource=ManagedCluster,packageName=github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4,packageAlias=armcontainerservice,clientName=ManagedClustersClient,expand=false,rateLimitKey=containerServiceRateLimit
+type Interface interface {
+ utils.GetFunc[armcontainerservice.ManagedCluster]
+ utils.CreateOrUpdateFunc[armcontainerservice.ManagedCluster]
+ utils.DeleteFunc[armcontainerservice.ManagedCluster]
+ utils.ListFunc[armcontainerservice.ManagedCluster]
+}
diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/managedclusterclient/zz_generated_client.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/managedclusterclient/zz_generated_client.go
new file mode 100644
index 0000000000..16db2887e3
--- /dev/null
+++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/managedclusterclient/zz_generated_client.go
@@ -0,0 +1,86 @@
+// /*
+// Copyright The Kubernetes Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+// */
+
+// Code generated by client-gen. DO NOT EDIT.
+package managedclusterclient
+
+import (
+ "context"
+
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm"
+ armcontainerservice "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4"
+
+ "sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils"
+)
+
+type Client struct {
+ *armcontainerservice.ManagedClustersClient
+}
+
+func New(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (Interface, error) {
+ if options == nil {
+ options = utils.GetDefaultOption()
+ }
+
+ client, err := armcontainerservice.NewManagedClustersClient(subscriptionID, credential, options)
+ if err != nil {
+ return nil, err
+ }
+ return &Client{client}, nil
+}
+
+// Get gets the ManagedCluster
+func (client *Client) Get(ctx context.Context, resourceGroupName string, resourceName string) (result *armcontainerservice.ManagedCluster, rerr error) {
+
+ resp, err := client.ManagedClustersClient.Get(ctx, resourceGroupName, resourceName, nil)
+ if err != nil {
+ return nil, err
+ }
+ //handle statuscode
+ return &resp.ManagedCluster, nil
+}
+
+// CreateOrUpdate creates or updates a ManagedCluster.
+func (client *Client) CreateOrUpdate(ctx context.Context, resourceGroupName string, resourceName string, resource armcontainerservice.ManagedCluster) (*armcontainerservice.ManagedCluster, error) {
+ resp, err := utils.NewPollerWrapper(client.ManagedClustersClient.BeginCreateOrUpdate(ctx, resourceGroupName, resourceName, resource, nil)).WaitforPollerResp(ctx)
+ if err != nil {
+ return nil, err
+ }
+ if resp != nil {
+ return &resp.ManagedCluster, nil
+ }
+ return nil, nil
+}
+
+// Delete deletes a ManagedCluster by name.
+func (client *Client) Delete(ctx context.Context, resourceGroupName string, resourceName string) error {
+ _, err := utils.NewPollerWrapper(client.BeginDelete(ctx, resourceGroupName, resourceName, nil)).WaitforPollerResp(ctx)
+ return err
+}
+
+// List gets a list of ManagedCluster in the resource group.
+func (client *Client) List(ctx context.Context, resourceGroupName string) (result []*armcontainerservice.ManagedCluster, rerr error) {
+ pager := client.ManagedClustersClient.NewListByResourceGroupPager(resourceGroupName, nil)
+ for pager.More() {
+ nextResult, err := pager.NextPage(ctx)
+ if err != nil {
+ return nil, err
+ }
+ result = append(result, nextResult.Value...)
+ }
+ return result, nil
+}
diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/policy/ratelimit/flowcontrol/docs.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/policy/ratelimit/flowcontrol/docs.go
new file mode 100644
index 0000000000..ee96eb6f6b
--- /dev/null
+++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/policy/ratelimit/flowcontrol/docs.go
@@ -0,0 +1,18 @@
+/*
+Copyright 2023 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// flowcontrol is imported from https://github.com/kubernetes/client-go/tree/master/util/flowcontrol
+package flowcontrol
diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/policy/ratelimit/flowcontrol/throttle.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/policy/ratelimit/flowcontrol/throttle.go
new file mode 100644
index 0000000000..b5989aeaee
--- /dev/null
+++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/policy/ratelimit/flowcontrol/throttle.go
@@ -0,0 +1,192 @@
+/*
+Copyright 2023 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package flowcontrol
+
+import (
+ "context"
+ "errors"
+ "sync"
+ "time"
+
+ "golang.org/x/time/rate"
+ "k8s.io/utils/clock"
+)
+
+type PassiveRateLimiter interface {
+ // TryAccept returns true if a token is taken immediately. Otherwise,
+ // it returns false.
+ TryAccept() bool
+ // Stop stops the rate limiter, subsequent calls to CanAccept will return false
+ Stop()
+ // QPS returns QPS of this rate limiter
+ QPS() float32
+}
+
+type RateLimiter interface {
+ PassiveRateLimiter
+ // Accept returns once a token becomes available.
+ Accept()
+ // Wait returns nil if a token is taken before the Context is done.
+ Wait(ctx context.Context) error
+}
+
+type tokenBucketPassiveRateLimiter struct {
+ limiter *rate.Limiter
+ qps float32
+ clock clock.PassiveClock
+}
+
+type tokenBucketRateLimiter struct {
+ tokenBucketPassiveRateLimiter
+ clock Clock
+}
+
+// NewTokenBucketRateLimiter creates a rate limiter which implements a token bucket approach.
+// The rate limiter allows bursts of up to 'burst' to exceed the QPS, while still maintaining a
+// smoothed qps rate of 'qps'.
+// The bucket is initially filled with 'burst' tokens, and refills at a rate of 'qps'.
+// The maximum number of tokens in the bucket is capped at 'burst'.
+func NewTokenBucketRateLimiter(qps float32, burst int) RateLimiter {
+ limiter := rate.NewLimiter(rate.Limit(qps), burst)
+ return newTokenBucketRateLimiterWithClock(limiter, clock.RealClock{}, qps)
+}
+
+// NewTokenBucketPassiveRateLimiter is similar to NewTokenBucketRateLimiter except that it returns
+// a PassiveRateLimiter which does not have Accept() and Wait() methods.
+func NewTokenBucketPassiveRateLimiter(qps float32, burst int) PassiveRateLimiter {
+ limiter := rate.NewLimiter(rate.Limit(qps), burst)
+ return newTokenBucketRateLimiterWithPassiveClock(limiter, clock.RealClock{}, qps)
+}
+
+// An injectable, mockable clock interface.
+type Clock interface {
+ clock.PassiveClock
+ Sleep(time.Duration)
+}
+
+var _ Clock = (*clock.RealClock)(nil)
+
+// NewTokenBucketRateLimiterWithClock is identical to NewTokenBucketRateLimiter
+// but allows an injectable clock, for testing.
+func NewTokenBucketRateLimiterWithClock(qps float32, burst int, c Clock) RateLimiter {
+ limiter := rate.NewLimiter(rate.Limit(qps), burst)
+ return newTokenBucketRateLimiterWithClock(limiter, c, qps)
+}
+
+// NewTokenBucketPassiveRateLimiterWithClock is similar to NewTokenBucketRateLimiterWithClock
+// except that it returns a PassiveRateLimiter which does not have Accept() and Wait() methods
+// and uses a PassiveClock.
+func NewTokenBucketPassiveRateLimiterWithClock(qps float32, burst int, c clock.PassiveClock) PassiveRateLimiter {
+ limiter := rate.NewLimiter(rate.Limit(qps), burst)
+ return newTokenBucketRateLimiterWithPassiveClock(limiter, c, qps)
+}
+
+func newTokenBucketRateLimiterWithClock(limiter *rate.Limiter, c Clock, qps float32) *tokenBucketRateLimiter {
+ return &tokenBucketRateLimiter{
+ tokenBucketPassiveRateLimiter: *newTokenBucketRateLimiterWithPassiveClock(limiter, c, qps),
+ clock: c,
+ }
+}
+
+func newTokenBucketRateLimiterWithPassiveClock(limiter *rate.Limiter, c clock.PassiveClock, qps float32) *tokenBucketPassiveRateLimiter {
+ return &tokenBucketPassiveRateLimiter{
+ limiter: limiter,
+ qps: qps,
+ clock: c,
+ }
+}
+
+func (tbprl *tokenBucketPassiveRateLimiter) Stop() {
+}
+
+func (tbprl *tokenBucketPassiveRateLimiter) QPS() float32 {
+ return tbprl.qps
+}
+
+func (tbprl *tokenBucketPassiveRateLimiter) TryAccept() bool {
+ return tbprl.limiter.AllowN(tbprl.clock.Now(), 1)
+}
+
+// Accept will block until a token becomes available
+func (tbrl *tokenBucketRateLimiter) Accept() {
+ now := tbrl.clock.Now()
+ tbrl.clock.Sleep(tbrl.limiter.ReserveN(now, 1).DelayFrom(now))
+}
+
+func (tbrl *tokenBucketRateLimiter) Wait(ctx context.Context) error {
+ return tbrl.limiter.Wait(ctx)
+}
+
+type fakeAlwaysRateLimiter struct{}
+
+func NewFakeAlwaysRateLimiter() RateLimiter {
+ return &fakeAlwaysRateLimiter{}
+}
+
+func (t *fakeAlwaysRateLimiter) TryAccept() bool {
+ return true
+}
+
+func (t *fakeAlwaysRateLimiter) Stop() {}
+
+func (t *fakeAlwaysRateLimiter) Accept() {}
+
+func (t *fakeAlwaysRateLimiter) QPS() float32 {
+ return 1
+}
+
+func (t *fakeAlwaysRateLimiter) Wait(_ context.Context) error {
+ return nil
+}
+
+type fakeNeverRateLimiter struct {
+ wg sync.WaitGroup
+}
+
+func NewFakeNeverRateLimiter() RateLimiter {
+ rl := fakeNeverRateLimiter{}
+ rl.wg.Add(1)
+ return &rl
+}
+
+func (t *fakeNeverRateLimiter) TryAccept() bool {
+ return false
+}
+
+func (t *fakeNeverRateLimiter) Stop() {
+ t.wg.Done()
+}
+
+func (t *fakeNeverRateLimiter) Accept() {
+ t.wg.Wait()
+}
+
+func (t *fakeNeverRateLimiter) QPS() float32 {
+ return 1
+}
+
+func (t *fakeNeverRateLimiter) Wait(_ context.Context) error {
+ return errors.New("can not be accept")
+}
+
+var (
+ _ RateLimiter = (*tokenBucketRateLimiter)(nil)
+ _ RateLimiter = (*fakeAlwaysRateLimiter)(nil)
+ _ RateLimiter = (*fakeNeverRateLimiter)(nil)
+)
+
+var _ PassiveRateLimiter = (*tokenBucketPassiveRateLimiter)(nil)
diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/policy/ratelimit/ratelimit.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/policy/ratelimit/ratelimit.go
new file mode 100644
index 0000000000..0ed37f258f
--- /dev/null
+++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/policy/ratelimit/ratelimit.go
@@ -0,0 +1,101 @@
+/*
+Copyright 2023 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package ratelimit
+
+import (
+ "errors"
+ "net/http"
+
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
+
+ "sigs.k8s.io/cloud-provider-azure/pkg/azclient/policy/ratelimit/flowcontrol"
+)
+
+// Config indicates the rate limit config options.
+type Config struct {
+ // Enable rate limiting
+ CloudProviderRateLimit bool `json:"cloudProviderRateLimit,omitempty" yaml:"cloudProviderRateLimit,omitempty"`
+ // Rate limit QPS (Read)
+ CloudProviderRateLimitQPS float32 `json:"cloudProviderRateLimitQPS,omitempty" yaml:"cloudProviderRateLimitQPS,omitempty"`
+ // Rate limit Bucket Size
+ CloudProviderRateLimitBucket int `json:"cloudProviderRateLimitBucket,omitempty" yaml:"cloudProviderRateLimitBucket,omitempty"`
+ // Rate limit QPS (Write)
+ CloudProviderRateLimitQPSWrite float32 `json:"cloudProviderRateLimitQPSWrite,omitempty" yaml:"cloudProviderRateLimitQPSWrite,omitempty"`
+ // Rate limit Bucket Size
+ CloudProviderRateLimitBucketWrite int `json:"cloudProviderRateLimitBucketWrite,omitempty" yaml:"cloudProviderRateLimitBucketWrite,omitempty"`
+}
+
+func NewRateLimitPolicy(config *Config) policy.Policy {
+ if config != nil && config.CloudProviderRateLimit {
+ readLimiter := flowcontrol.NewTokenBucketRateLimiter(
+ config.CloudProviderRateLimitQPS,
+ config.CloudProviderRateLimitBucket)
+
+ writeLimiter := flowcontrol.NewTokenBucketRateLimiter(
+ config.CloudProviderRateLimitQPSWrite,
+ config.CloudProviderRateLimitBucketWrite)
+ return &Policy{
+ rateLimiterReader: readLimiter,
+ rateLimiterWriter: writeLimiter,
+ }
+ }
+ return nil
+}
+
+type Policy struct {
+ rateLimiterWriter flowcontrol.RateLimiter
+ rateLimiterReader flowcontrol.RateLimiter
+}
+
+func (f Policy) Do(req *policy.Request) (*http.Response, error) {
+ if req.Raw().Method == http.MethodGet || req.Raw().Method == http.MethodHead {
+ if !f.rateLimiterReader.TryAccept() {
+ return nil, errors.New("rate limit reached")
+ }
+ } else {
+ if !f.rateLimiterWriter.TryAccept() {
+ return nil, errors.New("rate limit reached")
+ }
+ }
+ return req.Next()
+}
+
+// CloudProviderRateLimitConfig indicates the rate limit config for each clients.
+type CloudProviderRateLimitConfig struct {
+ // The default rate limit config options.
+ Config
+
+ // Rate limit config for each clients. Values would override default settings above.
+ Entries map[string]*Config `json:",inline" yaml:",inline"`
+}
+
+func NewCloudProviderRateLimitConfig() *CloudProviderRateLimitConfig {
+ return &CloudProviderRateLimitConfig{
+ Config: Config{
+ CloudProviderRateLimit: false,
+ },
+ Entries: make(map[string]*Config),
+ }
+}
+
+// GetRateLimitConfig returns the rate limit config for the given client. if the client is not found, the default is returned.
+func (config *CloudProviderRateLimitConfig) GetRateLimitConfig(clientName string) *Config {
+ if entry, ok := config.Entries[clientName]; ok {
+ return entry
+ }
+ return &config.Config
+}
diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/policy/retryrepectthrottled/throttle.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/policy/retryrepectthrottled/throttle.go
new file mode 100644
index 0000000000..80c5e33f86
--- /dev/null
+++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/policy/retryrepectthrottled/throttle.go
@@ -0,0 +1,88 @@
+/*
+Copyright 2023 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package retryrepectthrottled
+
+import (
+ "errors"
+ "net/http"
+ "strconv"
+ "time"
+
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
+)
+
+const HeaderRetryAfter = "Retry-After"
+
+func NewThrottlingPolicy() policy.Policy {
+ return &ThrottlingPolicy{
+ RetryAfterReader: time.Now(),
+ RetryAfterWriter: time.Now(),
+ }
+}
+
+func GetRetriableStatusCode() []int {
+ return []int{
+ http.StatusRequestTimeout, // 408
+ http.StatusInternalServerError, // 500
+ http.StatusBadGateway, // 502
+ http.StatusServiceUnavailable, // 503
+ http.StatusGatewayTimeout, // 504
+ }
+}
+
+// ThrottlingPolicy implements the Azure SDK for Go's Policy interface.
+// throttle counter is based on resources operation per subscription.
+type ThrottlingPolicy struct {
+ RetryAfterReader time.Time
+ RetryAfterWriter time.Time
+}
+
+func (p *ThrottlingPolicy) Do(req *policy.Request) (*http.Response, error) {
+ if req.Raw().Method == http.MethodGet || req.Raw().Method == http.MethodHead {
+ return p.processThrottlePolicy(&p.RetryAfterReader, req)
+ }
+ return p.processThrottlePolicy(&p.RetryAfterWriter, req)
+}
+
+func (p *ThrottlingPolicy) processThrottlePolicy(timer *time.Time, req *policy.Request) (*http.Response, error) {
+ if timer.After(time.Now()) {
+ return nil, errors.New("ThrottlingPolicy: Too many requests")
+ }
+ resp, err := req.Next()
+ if err != nil {
+ return resp, err
+ }
+ if runtime.HasStatusCode(resp, http.StatusTooManyRequests) {
+ // throttle policy will be triggered when the response status code is 429
+ // in v1 client, throttle policy will be triggered when the retry-after header is set
+ // according to https://learn.microsoft.com/en-us/azure/azure-resource-manager/management/async-operations
+ // the retry-after header will be set when the response status code is 202
+ duration := resp.Header.Get(HeaderRetryAfter)
+ if duration == "" {
+ *timer = time.Now()
+ }
+ if retryAfter, _ := strconv.Atoi(duration); retryAfter > 0 {
+ *timer = time.Now().Add(time.Duration(retryAfter) * time.Second)
+ } else if t, err := time.Parse(time.RFC1123, duration); err == nil {
+ *timer = t
+ }
+
+ return resp, errors.New("ThrottlingPolicy: Too many requests")
+ }
+ return resp, nil
+}
diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/privateendpointclient/interface.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/privateendpointclient/interface.go
new file mode 100644
index 0000000000..9fff0e214f
--- /dev/null
+++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/privateendpointclient/interface.go
@@ -0,0 +1,30 @@
+/*
+Copyright 2023 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +azure:enableclientgen:=true
+package privateendpointclient
+
+import (
+ armnetwork "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4"
+
+ "sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils"
+)
+
+// +azure:client:verbs=get;createorupdate,resource=PrivateEndpoint,packageName=github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4,packageAlias=armnetwork,clientName=PrivateEndpointsClient,expand=true,rateLimitKey=privateEndpointRateLimit
+type Interface interface {
+ utils.GetWithExpandFunc[armnetwork.PrivateEndpoint]
+ utils.CreateOrUpdateFunc[armnetwork.PrivateEndpoint]
+}
diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/privateendpointclient/zz_generated_client.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/privateendpointclient/zz_generated_client.go
new file mode 100644
index 0000000000..a1b423bb0f
--- /dev/null
+++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/privateendpointclient/zz_generated_client.go
@@ -0,0 +1,70 @@
+// /*
+// Copyright The Kubernetes Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+// */
+
+// Code generated by client-gen. DO NOT EDIT.
+package privateendpointclient
+
+import (
+ "context"
+
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm"
+ armnetwork "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4"
+
+ "sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils"
+)
+
+type Client struct {
+ *armnetwork.PrivateEndpointsClient
+}
+
+func New(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (Interface, error) {
+ if options == nil {
+ options = utils.GetDefaultOption()
+ }
+
+ client, err := armnetwork.NewPrivateEndpointsClient(subscriptionID, credential, options)
+ if err != nil {
+ return nil, err
+ }
+ return &Client{client}, nil
+}
+
+// Get gets the PrivateEndpoint
+func (client *Client) Get(ctx context.Context, resourceGroupName string, resourceName string, expand *string) (result *armnetwork.PrivateEndpoint, rerr error) {
+ var ops *armnetwork.PrivateEndpointsClientGetOptions
+ if expand != nil {
+ ops = &armnetwork.PrivateEndpointsClientGetOptions{Expand: expand}
+ }
+ resp, err := client.PrivateEndpointsClient.Get(ctx, resourceGroupName, resourceName, ops)
+ if err != nil {
+ return nil, err
+ }
+ //handle statuscode
+ return &resp.PrivateEndpoint, nil
+}
+
+// CreateOrUpdate creates or updates a PrivateEndpoint.
+func (client *Client) CreateOrUpdate(ctx context.Context, resourceGroupName string, resourceName string, resource armnetwork.PrivateEndpoint) (*armnetwork.PrivateEndpoint, error) {
+ resp, err := utils.NewPollerWrapper(client.PrivateEndpointsClient.BeginCreateOrUpdate(ctx, resourceGroupName, resourceName, resource, nil)).WaitforPollerResp(ctx)
+ if err != nil {
+ return nil, err
+ }
+ if resp != nil {
+ return &resp.PrivateEndpoint, nil
+ }
+ return nil, nil
+}
diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/privatelinkserviceclient/interface.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/privatelinkserviceclient/interface.go
new file mode 100644
index 0000000000..a152277a31
--- /dev/null
+++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/privatelinkserviceclient/interface.go
@@ -0,0 +1,32 @@
+/*
+Copyright 2023 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +azure:enableclientgen:=true
+package privatelinkserviceclient
+
+import (
+ armnetwork "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4"
+
+ "sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils"
+)
+
+// +azure:client:verbs=get;createorupdate;delete;list,resource=PrivateLinkService,packageName=github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4,packageAlias=armnetwork,clientName=PrivateLinkServicesClient,expand=true,rateLimitKey=privateLinkServiceRateLimit
+type Interface interface {
+ utils.GetWithExpandFunc[armnetwork.PrivateLinkService]
+ utils.CreateOrUpdateFunc[armnetwork.PrivateLinkService]
+ utils.DeleteFunc[armnetwork.PrivateLinkService]
+ utils.ListFunc[armnetwork.PrivateLinkService]
+}
diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/privatelinkserviceclient/zz_generated_client.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/privatelinkserviceclient/zz_generated_client.go
new file mode 100644
index 0000000000..40f15c886a
--- /dev/null
+++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/privatelinkserviceclient/zz_generated_client.go
@@ -0,0 +1,89 @@
+// /*
+// Copyright The Kubernetes Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+// */
+
+// Code generated by client-gen. DO NOT EDIT.
+package privatelinkserviceclient
+
+import (
+ "context"
+
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm"
+ armnetwork "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4"
+
+ "sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils"
+)
+
+type Client struct {
+ *armnetwork.PrivateLinkServicesClient
+}
+
+func New(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (Interface, error) {
+ if options == nil {
+ options = utils.GetDefaultOption()
+ }
+
+ client, err := armnetwork.NewPrivateLinkServicesClient(subscriptionID, credential, options)
+ if err != nil {
+ return nil, err
+ }
+ return &Client{client}, nil
+}
+
+// Get gets the PrivateLinkService
+func (client *Client) Get(ctx context.Context, resourceGroupName string, resourceName string, expand *string) (result *armnetwork.PrivateLinkService, rerr error) {
+ var ops *armnetwork.PrivateLinkServicesClientGetOptions
+ if expand != nil {
+ ops = &armnetwork.PrivateLinkServicesClientGetOptions{Expand: expand}
+ }
+ resp, err := client.PrivateLinkServicesClient.Get(ctx, resourceGroupName, resourceName, ops)
+ if err != nil {
+ return nil, err
+ }
+ //handle statuscode
+ return &resp.PrivateLinkService, nil
+}
+
+// CreateOrUpdate creates or updates a PrivateLinkService.
+func (client *Client) CreateOrUpdate(ctx context.Context, resourceGroupName string, resourceName string, resource armnetwork.PrivateLinkService) (*armnetwork.PrivateLinkService, error) {
+ resp, err := utils.NewPollerWrapper(client.PrivateLinkServicesClient.BeginCreateOrUpdate(ctx, resourceGroupName, resourceName, resource, nil)).WaitforPollerResp(ctx)
+ if err != nil {
+ return nil, err
+ }
+ if resp != nil {
+ return &resp.PrivateLinkService, nil
+ }
+ return nil, nil
+}
+
+// Delete deletes a PrivateLinkService by name.
+func (client *Client) Delete(ctx context.Context, resourceGroupName string, resourceName string) error {
+ _, err := utils.NewPollerWrapper(client.BeginDelete(ctx, resourceGroupName, resourceName, nil)).WaitforPollerResp(ctx)
+ return err
+}
+
+// List gets a list of PrivateLinkService in the resource group.
+func (client *Client) List(ctx context.Context, resourceGroupName string) (result []*armnetwork.PrivateLinkService, rerr error) {
+ pager := client.PrivateLinkServicesClient.NewListPager(resourceGroupName, nil)
+ for pager.More() {
+ nextResult, err := pager.NextPage(ctx)
+ if err != nil {
+ return nil, err
+ }
+ result = append(result, nextResult.Value...)
+ }
+ return result, nil
+}
diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/privatezoneclient/interface.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/privatezoneclient/interface.go
new file mode 100644
index 0000000000..edb3b93ed5
--- /dev/null
+++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/privatezoneclient/interface.go
@@ -0,0 +1,30 @@
+/*
+Copyright 2023 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +azure:enableclientgen:=true
+package privatezoneclient
+
+import (
+ armprivatedns "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/privatedns/armprivatedns"
+
+ "sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils"
+)
+
+// +azure:client:verbs=get;createorupdate,resource=PrivateZone,packageName=github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/privatedns/armprivatedns,packageAlias=armprivatedns,clientName=PrivateZonesClient,expand=false,rateLimitKey=privateDNSRateLimit
+type Interface interface {
+ utils.GetFunc[armprivatedns.PrivateZone]
+ utils.CreateOrUpdateFunc[armprivatedns.PrivateZone]
+}
diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/privatezoneclient/zz_generated_client.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/privatezoneclient/zz_generated_client.go
new file mode 100644
index 0000000000..52cd3d4f6e
--- /dev/null
+++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/privatezoneclient/zz_generated_client.go
@@ -0,0 +1,67 @@
+// /*
+// Copyright The Kubernetes Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+// */
+
+// Code generated by client-gen. DO NOT EDIT.
+package privatezoneclient
+
+import (
+ "context"
+
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm"
+ armprivatedns "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/privatedns/armprivatedns"
+
+ "sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils"
+)
+
+type Client struct {
+ *armprivatedns.PrivateZonesClient
+}
+
+func New(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (Interface, error) {
+ if options == nil {
+ options = utils.GetDefaultOption()
+ }
+
+ client, err := armprivatedns.NewPrivateZonesClient(subscriptionID, credential, options)
+ if err != nil {
+ return nil, err
+ }
+ return &Client{client}, nil
+}
+
+// Get gets the PrivateZone
+func (client *Client) Get(ctx context.Context, resourceGroupName string, resourceName string) (result *armprivatedns.PrivateZone, rerr error) {
+
+ resp, err := client.PrivateZonesClient.Get(ctx, resourceGroupName, resourceName, nil)
+ if err != nil {
+ return nil, err
+ }
+ //handle statuscode
+ return &resp.PrivateZone, nil
+}
+
+// CreateOrUpdate creates or updates a PrivateZone.
+func (client *Client) CreateOrUpdate(ctx context.Context, resourceGroupName string, resourceName string, resource armprivatedns.PrivateZone) (*armprivatedns.PrivateZone, error) {
+ resp, err := utils.NewPollerWrapper(client.PrivateZonesClient.BeginCreateOrUpdate(ctx, resourceGroupName, resourceName, resource, nil)).WaitforPollerResp(ctx)
+ if err != nil {
+ return nil, err
+ }
+ if resp != nil {
+ return &resp.PrivateZone, nil
+ }
+ return nil, nil
+}
diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/publicipaddressclient/interface.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/publicipaddressclient/interface.go
new file mode 100644
index 0000000000..36e5184366
--- /dev/null
+++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/publicipaddressclient/interface.go
@@ -0,0 +1,32 @@
+/*
+Copyright 2023 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +azure:enableclientgen:=true
+package publicipaddressclient
+
+import (
+ armnetwork "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4"
+
+ "sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils"
+)
+
+// +azure:client:verbs=get;createorupdate;delete;list,resource=PublicIPAddress,packageName=github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4,packageAlias=armnetwork,clientName=PublicIPAddressesClient,expand=true,rateLimitKey=publicIPAddressRateLimit
+type Interface interface {
+ utils.GetWithExpandFunc[armnetwork.PublicIPAddress]
+ utils.CreateOrUpdateFunc[armnetwork.PublicIPAddress]
+ utils.DeleteFunc[armnetwork.PublicIPAddress]
+ utils.ListFunc[armnetwork.PublicIPAddress]
+}
diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/publicipaddressclient/zz_generated_client.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/publicipaddressclient/zz_generated_client.go
new file mode 100644
index 0000000000..07b7b023f0
--- /dev/null
+++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/publicipaddressclient/zz_generated_client.go
@@ -0,0 +1,89 @@
+// /*
+// Copyright The Kubernetes Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+// */
+
+// Code generated by client-gen. DO NOT EDIT.
+package publicipaddressclient
+
+import (
+ "context"
+
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm"
+ armnetwork "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4"
+
+ "sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils"
+)
+
+type Client struct {
+ *armnetwork.PublicIPAddressesClient
+}
+
+func New(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (Interface, error) {
+ if options == nil {
+ options = utils.GetDefaultOption()
+ }
+
+ client, err := armnetwork.NewPublicIPAddressesClient(subscriptionID, credential, options)
+ if err != nil {
+ return nil, err
+ }
+ return &Client{client}, nil
+}
+
+// Get gets the PublicIPAddress
+func (client *Client) Get(ctx context.Context, resourceGroupName string, resourceName string, expand *string) (result *armnetwork.PublicIPAddress, rerr error) {
+ var ops *armnetwork.PublicIPAddressesClientGetOptions
+ if expand != nil {
+ ops = &armnetwork.PublicIPAddressesClientGetOptions{Expand: expand}
+ }
+ resp, err := client.PublicIPAddressesClient.Get(ctx, resourceGroupName, resourceName, ops)
+ if err != nil {
+ return nil, err
+ }
+ //handle statuscode
+ return &resp.PublicIPAddress, nil
+}
+
+// CreateOrUpdate creates or updates a PublicIPAddress.
+func (client *Client) CreateOrUpdate(ctx context.Context, resourceGroupName string, resourceName string, resource armnetwork.PublicIPAddress) (*armnetwork.PublicIPAddress, error) {
+ resp, err := utils.NewPollerWrapper(client.PublicIPAddressesClient.BeginCreateOrUpdate(ctx, resourceGroupName, resourceName, resource, nil)).WaitforPollerResp(ctx)
+ if err != nil {
+ return nil, err
+ }
+ if resp != nil {
+ return &resp.PublicIPAddress, nil
+ }
+ return nil, nil
+}
+
+// Delete deletes a PublicIPAddress by name.
+func (client *Client) Delete(ctx context.Context, resourceGroupName string, resourceName string) error {
+ _, err := utils.NewPollerWrapper(client.BeginDelete(ctx, resourceGroupName, resourceName, nil)).WaitforPollerResp(ctx)
+ return err
+}
+
+// List gets a list of PublicIPAddress in the resource group.
+func (client *Client) List(ctx context.Context, resourceGroupName string) (result []*armnetwork.PublicIPAddress, rerr error) {
+ pager := client.PublicIPAddressesClient.NewListPager(resourceGroupName, nil)
+ for pager.More() {
+ nextResult, err := pager.NextPage(ctx)
+ if err != nil {
+ return nil, err
+ }
+ result = append(result, nextResult.Value...)
+ }
+ return result, nil
+}
diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/publicipprefixclient/interface.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/publicipprefixclient/interface.go
new file mode 100644
index 0000000000..3f02160a51
--- /dev/null
+++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/publicipprefixclient/interface.go
@@ -0,0 +1,32 @@
+/*
+Copyright 2023 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +azure:enableclientgen:=true
+package publicipprefixclient
+
+import (
+ armnetwork "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4"
+
+ "sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils"
+)
+
+// +azure:client:verbs=get;createorupdate;delete;list,resource=PublicIPPrefix,packageName=github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4,packageAlias=armnetwork,clientName=PublicIPPrefixesClient,expand=true
+type Interface interface {
+ utils.GetWithExpandFunc[armnetwork.PublicIPPrefix]
+ utils.CreateOrUpdateFunc[armnetwork.PublicIPPrefix]
+ utils.DeleteFunc[armnetwork.PublicIPPrefix]
+ utils.ListFunc[armnetwork.PublicIPPrefix]
+}
diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/publicipprefixclient/zz_generated_client.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/publicipprefixclient/zz_generated_client.go
new file mode 100644
index 0000000000..4967d4b914
--- /dev/null
+++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/publicipprefixclient/zz_generated_client.go
@@ -0,0 +1,89 @@
+// /*
+// Copyright The Kubernetes Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+// */
+
+// Code generated by client-gen. DO NOT EDIT.
+package publicipprefixclient
+
+import (
+ "context"
+
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm"
+ armnetwork "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4"
+
+ "sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils"
+)
+
+type Client struct {
+ *armnetwork.PublicIPPrefixesClient
+}
+
+func New(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (Interface, error) {
+ if options == nil {
+ options = utils.GetDefaultOption()
+ }
+
+ client, err := armnetwork.NewPublicIPPrefixesClient(subscriptionID, credential, options)
+ if err != nil {
+ return nil, err
+ }
+ return &Client{client}, nil
+}
+
+// Get gets the PublicIPPrefix
+func (client *Client) Get(ctx context.Context, resourceGroupName string, resourceName string, expand *string) (result *armnetwork.PublicIPPrefix, rerr error) {
+ var ops *armnetwork.PublicIPPrefixesClientGetOptions
+ if expand != nil {
+ ops = &armnetwork.PublicIPPrefixesClientGetOptions{Expand: expand}
+ }
+ resp, err := client.PublicIPPrefixesClient.Get(ctx, resourceGroupName, resourceName, ops)
+ if err != nil {
+ return nil, err
+ }
+ //handle statuscode
+ return &resp.PublicIPPrefix, nil
+}
+
+// CreateOrUpdate creates or updates a PublicIPPrefix.
+func (client *Client) CreateOrUpdate(ctx context.Context, resourceGroupName string, resourceName string, resource armnetwork.PublicIPPrefix) (*armnetwork.PublicIPPrefix, error) {
+ resp, err := utils.NewPollerWrapper(client.PublicIPPrefixesClient.BeginCreateOrUpdate(ctx, resourceGroupName, resourceName, resource, nil)).WaitforPollerResp(ctx)
+ if err != nil {
+ return nil, err
+ }
+ if resp != nil {
+ return &resp.PublicIPPrefix, nil
+ }
+ return nil, nil
+}
+
+// Delete deletes a PublicIPPrefix by name.
+func (client *Client) Delete(ctx context.Context, resourceGroupName string, resourceName string) error {
+ _, err := utils.NewPollerWrapper(client.BeginDelete(ctx, resourceGroupName, resourceName, nil)).WaitforPollerResp(ctx)
+ return err
+}
+
+// List gets a list of PublicIPPrefix in the resource group.
+func (client *Client) List(ctx context.Context, resourceGroupName string) (result []*armnetwork.PublicIPPrefix, rerr error) {
+ pager := client.PublicIPPrefixesClient.NewListPager(resourceGroupName, nil)
+ for pager.More() {
+ nextResult, err := pager.NextPage(ctx)
+ if err != nil {
+ return nil, err
+ }
+ result = append(result, nextResult.Value...)
+ }
+ return result, nil
+}
diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/resourcegroupclient/custom.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/resourcegroupclient/custom.go
new file mode 100644
index 0000000000..5c55020c5b
--- /dev/null
+++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/resourcegroupclient/custom.go
@@ -0,0 +1,58 @@
+/*
+Copyright 2023 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package resourcegroupclient
+
+import (
+ "context"
+
+ resources "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources"
+
+ "sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils"
+)
+
+func (client *Client) Get(ctx context.Context, resourceGroupName string) (result *resources.ResourceGroup, rerr error) {
+ resp, err := client.ResourceGroupsClient.Get(ctx, resourceGroupName, nil)
+ if err != nil {
+ return nil, err
+ }
+ //handle statuscode
+ return &resp.ResourceGroup, nil
+}
+func (client *Client) CreateOrUpdate(ctx context.Context, resourceGroupName string, resourceParam resources.ResourceGroup) (*resources.ResourceGroup, error) {
+ resp, err := client.ResourceGroupsClient.CreateOrUpdate(ctx, resourceGroupName, resourceParam, nil)
+ if err != nil {
+ return nil, err
+ }
+ return &resp.ResourceGroup, nil
+}
+
+func (client *Client) Delete(ctx context.Context, resourceGroupName string) error {
+ _, err := utils.NewPollerWrapper(client.BeginDelete(ctx, resourceGroupName, nil)).WaitforPollerResp(ctx)
+ return err
+}
+
+func (client *Client) List(ctx context.Context) (result []*resources.ResourceGroup, rerr error) {
+ pager := client.ResourceGroupsClient.NewListPager(nil)
+ for pager.More() {
+ nextResult, err := pager.NextPage(ctx)
+ if err != nil {
+ return nil, err
+ }
+ result = append(result, nextResult.Value...)
+ }
+ return result, nil
+}
diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/resourcegroupclient/interface.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/resourcegroupclient/interface.go
new file mode 100644
index 0000000000..96336d3a1b
--- /dev/null
+++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/resourcegroupclient/interface.go
@@ -0,0 +1,32 @@
+/*
+Copyright 2023 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +azure:enableclientgen:=true
+package resourcegroupclient
+
+import (
+ "context"
+
+ resources "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources"
+)
+
+// +azure:client:verbs=,resource=ResourceGroup,packageName=github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources,packageAlias=resources,clientName=ResourceGroupsClient,expand=false
+type Interface interface {
+ Get(ctx context.Context, resourceGroupName string) (result *resources.ResourceGroup, rerr error)
+ CreateOrUpdate(ctx context.Context, resourceGroupName string, resourceParam resources.ResourceGroup) (*resources.ResourceGroup, error)
+ Delete(ctx context.Context, resourceGroupName string) error
+ List(ctx context.Context) (result []*resources.ResourceGroup, rerr error)
+}
diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/resourcegroupclient/zz_generated_client.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/resourcegroupclient/zz_generated_client.go
new file mode 100644
index 0000000000..75fabdb727
--- /dev/null
+++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/resourcegroupclient/zz_generated_client.go
@@ -0,0 +1,42 @@
+// /*
+// Copyright The Kubernetes Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+// */
+
+// Code generated by client-gen. DO NOT EDIT.
+package resourcegroupclient
+
+import (
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm"
+ resources "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources"
+
+ "sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils"
+)
+
+type Client struct {
+ *resources.ResourceGroupsClient
+}
+
+func New(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (Interface, error) {
+ if options == nil {
+ options = utils.GetDefaultOption()
+ }
+
+ client, err := resources.NewResourceGroupsClient(subscriptionID, credential, options)
+ if err != nil {
+ return nil, err
+ }
+ return &Client{client}, nil
+}
diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/routetableclient/interface.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/routetableclient/interface.go
new file mode 100644
index 0000000000..c99d456d50
--- /dev/null
+++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/routetableclient/interface.go
@@ -0,0 +1,30 @@
+/*
+Copyright 2023 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +azure:enableclientgen:=true
+package routetableclient
+
+import (
+ armnetwork "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4"
+
+ "sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils"
+)
+
+// +azure:client:verbs=createorupdate;delete,resource=RouteTable,packageName=github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4,packageAlias=armnetwork,clientName=RouteTablesClient,expand=false,rateLimitKey=routeTableRateLimit
+type Interface interface {
+ utils.CreateOrUpdateFunc[armnetwork.RouteTable]
+ utils.DeleteFunc[armnetwork.RouteTable]
+}
diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/routetableclient/zz_generated_client.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/routetableclient/zz_generated_client.go
new file mode 100644
index 0000000000..f671cfb264
--- /dev/null
+++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/routetableclient/zz_generated_client.go
@@ -0,0 +1,62 @@
+// /*
+// Copyright The Kubernetes Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+// */
+
+// Code generated by client-gen. DO NOT EDIT.
+package routetableclient
+
+import (
+ "context"
+
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm"
+ armnetwork "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4"
+
+ "sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils"
+)
+
+type Client struct {
+ *armnetwork.RouteTablesClient
+}
+
+func New(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (Interface, error) {
+ if options == nil {
+ options = utils.GetDefaultOption()
+ }
+
+ client, err := armnetwork.NewRouteTablesClient(subscriptionID, credential, options)
+ if err != nil {
+ return nil, err
+ }
+ return &Client{client}, nil
+}
+
+// CreateOrUpdate creates or updates a RouteTable.
+func (client *Client) CreateOrUpdate(ctx context.Context, resourceGroupName string, resourceName string, resource armnetwork.RouteTable) (*armnetwork.RouteTable, error) {
+ resp, err := utils.NewPollerWrapper(client.RouteTablesClient.BeginCreateOrUpdate(ctx, resourceGroupName, resourceName, resource, nil)).WaitforPollerResp(ctx)
+ if err != nil {
+ return nil, err
+ }
+ if resp != nil {
+ return &resp.RouteTable, nil
+ }
+ return nil, nil
+}
+
+// Delete deletes a RouteTable by name.
+func (client *Client) Delete(ctx context.Context, resourceGroupName string, resourceName string) error {
+ _, err := utils.NewPollerWrapper(client.BeginDelete(ctx, resourceGroupName, resourceName, nil)).WaitforPollerResp(ctx)
+ return err
+}
diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/securitygroupclient/interface.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/securitygroupclient/interface.go
new file mode 100644
index 0000000000..b243b34a60
--- /dev/null
+++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/securitygroupclient/interface.go
@@ -0,0 +1,32 @@
+/*
+Copyright 2023 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +azure:enableclientgen:=true
+package securitygroupclient
+
+import (
+ armnetwork "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4"
+
+ "sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils"
+)
+
+// +azure:client:verbs=get;createorupdate;delete;list,resource=SecurityGroup,packageName=github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4,packageAlias=armnetwork,clientName=SecurityGroupsClient,expand=false,rateLimitKey=securityGroupRateLimit
+type Interface interface {
+ utils.GetFunc[armnetwork.SecurityGroup]
+ utils.CreateOrUpdateFunc[armnetwork.SecurityGroup]
+ utils.DeleteFunc[armnetwork.SecurityGroup]
+ utils.ListFunc[armnetwork.SecurityGroup]
+}
diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/securitygroupclient/zz_generated_client.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/securitygroupclient/zz_generated_client.go
new file mode 100644
index 0000000000..a5107279f6
--- /dev/null
+++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/securitygroupclient/zz_generated_client.go
@@ -0,0 +1,86 @@
+// /*
+// Copyright The Kubernetes Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+// */
+
+// Code generated by client-gen. DO NOT EDIT.
+package securitygroupclient
+
+import (
+ "context"
+
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm"
+ armnetwork "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4"
+
+ "sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils"
+)
+
+type Client struct {
+ *armnetwork.SecurityGroupsClient
+}
+
+func New(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (Interface, error) {
+ if options == nil {
+ options = utils.GetDefaultOption()
+ }
+
+ client, err := armnetwork.NewSecurityGroupsClient(subscriptionID, credential, options)
+ if err != nil {
+ return nil, err
+ }
+ return &Client{client}, nil
+}
+
+// Get gets the SecurityGroup
+func (client *Client) Get(ctx context.Context, resourceGroupName string, resourceName string) (result *armnetwork.SecurityGroup, rerr error) {
+
+ resp, err := client.SecurityGroupsClient.Get(ctx, resourceGroupName, resourceName, nil)
+ if err != nil {
+ return nil, err
+ }
+ //handle statuscode
+ return &resp.SecurityGroup, nil
+}
+
+// CreateOrUpdate creates or updates a SecurityGroup.
+func (client *Client) CreateOrUpdate(ctx context.Context, resourceGroupName string, resourceName string, resource armnetwork.SecurityGroup) (*armnetwork.SecurityGroup, error) {
+ resp, err := utils.NewPollerWrapper(client.SecurityGroupsClient.BeginCreateOrUpdate(ctx, resourceGroupName, resourceName, resource, nil)).WaitforPollerResp(ctx)
+ if err != nil {
+ return nil, err
+ }
+ if resp != nil {
+ return &resp.SecurityGroup, nil
+ }
+ return nil, nil
+}
+
+// Delete deletes a SecurityGroup by name.
+func (client *Client) Delete(ctx context.Context, resourceGroupName string, resourceName string) error {
+ _, err := utils.NewPollerWrapper(client.BeginDelete(ctx, resourceGroupName, resourceName, nil)).WaitforPollerResp(ctx)
+ return err
+}
+
+// List gets a list of SecurityGroup in the resource group.
+func (client *Client) List(ctx context.Context, resourceGroupName string) (result []*armnetwork.SecurityGroup, rerr error) {
+ pager := client.SecurityGroupsClient.NewListPager(resourceGroupName, nil)
+ for pager.More() {
+ nextResult, err := pager.NextPage(ctx)
+ if err != nil {
+ return nil, err
+ }
+ result = append(result, nextResult.Value...)
+ }
+ return result, nil
+}
diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/snapshotclient/custom.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/snapshotclient/custom.go
new file mode 100644
index 0000000000..d34163d92f
--- /dev/null
+++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/snapshotclient/custom.go
@@ -0,0 +1,36 @@
+/*
+Copyright 2023 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package snapshotclient
+
+import (
+ "context"
+
+ armcompute "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5"
+)
+
+// List gets a list of Snapshot in the resource group.
+func (client *Client) List(ctx context.Context, resourceGroupName string) (result []*armcompute.Snapshot, rerr error) {
+ pager := client.SnapshotsClient.NewListByResourceGroupPager(resourceGroupName, nil)
+ for pager.More() {
+ nextResult, err := pager.NextPage(ctx)
+ if err != nil {
+ return nil, err
+ }
+ result = append(result, nextResult.Value...)
+ }
+ return result, nil
+}
diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/snapshotclient/interface.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/snapshotclient/interface.go
new file mode 100644
index 0000000000..1cae1fb807
--- /dev/null
+++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/snapshotclient/interface.go
@@ -0,0 +1,32 @@
+/*
+Copyright 2023 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +azure:enableclientgen:=true
+package snapshotclient
+
+import (
+ armcompute "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5"
+
+ "sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils"
+)
+
+// +azure:client:verbs=get;createorupdate;delete,resource=Snapshot,packageName=github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5,packageAlias=armcompute,clientName=SnapshotsClient,expand=false,rateLimitKey=snapshotRateLimit
+type Interface interface {
+ utils.GetFunc[armcompute.Snapshot]
+ utils.CreateOrUpdateFunc[armcompute.Snapshot]
+ utils.DeleteFunc[armcompute.Snapshot]
+ utils.ListFunc[armcompute.Snapshot]
+}
diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/snapshotclient/zz_generated_client.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/snapshotclient/zz_generated_client.go
new file mode 100644
index 0000000000..d395d03319
--- /dev/null
+++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/snapshotclient/zz_generated_client.go
@@ -0,0 +1,73 @@
+// /*
+// Copyright The Kubernetes Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+// */
+
+// Code generated by client-gen. DO NOT EDIT.
+package snapshotclient
+
+import (
+ "context"
+
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm"
+ armcompute "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5"
+
+ "sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils"
+)
+
+type Client struct {
+ *armcompute.SnapshotsClient
+}
+
+func New(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (Interface, error) {
+ if options == nil {
+ options = utils.GetDefaultOption()
+ }
+
+ client, err := armcompute.NewSnapshotsClient(subscriptionID, credential, options)
+ if err != nil {
+ return nil, err
+ }
+ return &Client{client}, nil
+}
+
+// Get gets the Snapshot
+func (client *Client) Get(ctx context.Context, resourceGroupName string, resourceName string) (result *armcompute.Snapshot, rerr error) {
+
+ resp, err := client.SnapshotsClient.Get(ctx, resourceGroupName, resourceName, nil)
+ if err != nil {
+ return nil, err
+ }
+ //handle statuscode
+ return &resp.Snapshot, nil
+}
+
+// CreateOrUpdate creates or updates a Snapshot.
+func (client *Client) CreateOrUpdate(ctx context.Context, resourceGroupName string, resourceName string, resource armcompute.Snapshot) (*armcompute.Snapshot, error) {
+ resp, err := utils.NewPollerWrapper(client.SnapshotsClient.BeginCreateOrUpdate(ctx, resourceGroupName, resourceName, resource, nil)).WaitforPollerResp(ctx)
+ if err != nil {
+ return nil, err
+ }
+ if resp != nil {
+ return &resp.Snapshot, nil
+ }
+ return nil, nil
+}
+
+// Delete deletes a Snapshot by name.
+func (client *Client) Delete(ctx context.Context, resourceGroupName string, resourceName string) error {
+ _, err := utils.NewPollerWrapper(client.BeginDelete(ctx, resourceGroupName, resourceName, nil)).WaitforPollerResp(ctx)
+ return err
+}
diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/sshpublickeyresourceclient/custom.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/sshpublickeyresourceclient/custom.go
new file mode 100644
index 0000000000..192ab84b28
--- /dev/null
+++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/sshpublickeyresourceclient/custom.go
@@ -0,0 +1,53 @@
+/*
+Copyright 2023 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package sshpublickeyresourceclient
+
+import (
+ "context"
+
+ armcompute "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5"
+)
+
+// Delete deletes a SSHPublicKeyResource by name.
+func (client *Client) Delete(ctx context.Context, resourceGroupName string, resourceName string) error {
+ _, err := client.SSHPublicKeysClient.Delete(ctx, resourceGroupName, resourceName, nil)
+ return err
+}
+
+func (client *Client) GenerateKeyPair(ctx context.Context, resourceGroupName string, sshPublicKeyName string) (*armcompute.SSHPublicKeyGenerateKeyPairResult, error) {
+ resp, err := client.SSHPublicKeysClient.GenerateKeyPair(ctx, resourceGroupName, sshPublicKeyName, nil)
+ if err != nil {
+ return nil, err
+ }
+ return &resp.SSHPublicKeyGenerateKeyPairResult, nil
+}
+
+func (client *Client) Create(ctx context.Context, resourceGroupName string, sshPublicKeyName string, parameters armcompute.SSHPublicKeyResource) (*armcompute.SSHPublicKeyResource, error) {
+ resp, err := client.SSHPublicKeysClient.Create(ctx, resourceGroupName, sshPublicKeyName, parameters, nil)
+ if err != nil {
+ return nil, err
+ }
+ return &resp.SSHPublicKeyResource, nil
+}
+
+func (client *Client) Update(ctx context.Context, resourceGroupName string, sshPublicKeyName string, parameters armcompute.SSHPublicKeyUpdateResource) (*armcompute.SSHPublicKeyResource, error) {
+ resp, err := client.SSHPublicKeysClient.Update(ctx, resourceGroupName, sshPublicKeyName, parameters, nil)
+ if err != nil {
+ return nil, err
+ }
+ return &resp.SSHPublicKeyResource, nil
+}
diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/sshpublickeyresourceclient/interface.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/sshpublickeyresourceclient/interface.go
new file mode 100644
index 0000000000..f3ae3fc18f
--- /dev/null
+++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/sshpublickeyresourceclient/interface.go
@@ -0,0 +1,37 @@
+/*
+Copyright 2023 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +azure:enableclientgen:=true
+package sshpublickeyresourceclient
+
+import (
+ "context"
+
+ armcompute "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5"
+
+ "sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils"
+)
+
+// +azure:client:verbs=get;listbyrg,resource=SSHPublicKeyResource,packageName=github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5,packageAlias=armcompute,clientName=SSHPublicKeysClient,expand=false
+type Interface interface {
+ utils.GetFunc[armcompute.SSHPublicKeyResource]
+ utils.DeleteFunc[armcompute.SSHPublicKeyResource]
+ utils.ListFunc[armcompute.SSHPublicKeyResource]
+
+ Create(ctx context.Context, resourceGroupName string, sshPublicKeyName string, parameters armcompute.SSHPublicKeyResource) (*armcompute.SSHPublicKeyResource, error)
+ Update(ctx context.Context, resourceGroupName string, sshPublicKeyName string, parameters armcompute.SSHPublicKeyUpdateResource) (*armcompute.SSHPublicKeyResource, error)
+ GenerateKeyPair(ctx context.Context, resourceGroupName string, sshPublicKeyName string) (*armcompute.SSHPublicKeyGenerateKeyPairResult, error)
+}
diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/sshpublickeyresourceclient/zz_generated_client.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/sshpublickeyresourceclient/zz_generated_client.go
new file mode 100644
index 0000000000..df7437c174
--- /dev/null
+++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/sshpublickeyresourceclient/zz_generated_client.go
@@ -0,0 +1,68 @@
+// /*
+// Copyright The Kubernetes Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+// */
+
+// Code generated by client-gen. DO NOT EDIT.
+package sshpublickeyresourceclient
+
+import (
+ "context"
+
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm"
+ armcompute "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5"
+
+ "sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils"
+)
+
+type Client struct {
+ *armcompute.SSHPublicKeysClient
+}
+
+func New(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (Interface, error) {
+ if options == nil {
+ options = utils.GetDefaultOption()
+ }
+
+ client, err := armcompute.NewSSHPublicKeysClient(subscriptionID, credential, options)
+ if err != nil {
+ return nil, err
+ }
+ return &Client{client}, nil
+}
+
+// Get gets the SSHPublicKeyResource
+func (client *Client) Get(ctx context.Context, resourceGroupName string, resourceName string) (result *armcompute.SSHPublicKeyResource, rerr error) {
+
+ resp, err := client.SSHPublicKeysClient.Get(ctx, resourceGroupName, resourceName, nil)
+ if err != nil {
+ return nil, err
+ }
+ //handle statuscode
+ return &resp.SSHPublicKeyResource, nil
+}
+
+// List gets a list of SSHPublicKeyResource in the resource group.
+func (client *Client) List(ctx context.Context, resourceGroupName string) (result []*armcompute.SSHPublicKeyResource, rerr error) {
+ pager := client.SSHPublicKeysClient.NewListByResourceGroupPager(resourceGroupName, nil)
+ for pager.More() {
+ nextResult, err := pager.NextPage(ctx)
+ if err != nil {
+ return nil, err
+ }
+ result = append(result, nextResult.Value...)
+ }
+ return result, nil
+}
diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/subnetclient/interface.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/subnetclient/interface.go
new file mode 100644
index 0000000000..e2ddb888c0
--- /dev/null
+++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/subnetclient/interface.go
@@ -0,0 +1,32 @@
+/*
+Copyright 2023 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +azure:enableclientgen:=true
+package subnetclient
+
+import (
+ armnetwork "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4"
+
+ "sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils"
+)
+
+// +azure:client:verbs=get;createorupdate;delete;list,resource=VirtualNetwork,subResource=Subnet,packageName=github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4,packageAlias=armnetwork,clientName=SubnetsClient,expand=true,rateLimitKey=subnetsRateLimit
+type Interface interface {
+ utils.SubResourceGetWithExpandFunc[armnetwork.Subnet]
+ utils.SubResourceCreateOrUpdateFunc[armnetwork.Subnet]
+ utils.SubResourceDeleteFunc[armnetwork.Subnet]
+ utils.SubResourceListFunc[armnetwork.Subnet]
+}
diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/subnetclient/zz_generated_client.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/subnetclient/zz_generated_client.go
new file mode 100644
index 0000000000..00880b213f
--- /dev/null
+++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/subnetclient/zz_generated_client.go
@@ -0,0 +1,89 @@
+// /*
+// Copyright The Kubernetes Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+// */
+
+// Code generated by client-gen. DO NOT EDIT.
+package subnetclient
+
+import (
+ "context"
+
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm"
+ armnetwork "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4"
+
+ "sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils"
+)
+
+type Client struct {
+ *armnetwork.SubnetsClient
+}
+
+func New(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (Interface, error) {
+ if options == nil {
+ options = utils.GetDefaultOption()
+ }
+
+ client, err := armnetwork.NewSubnetsClient(subscriptionID, credential, options)
+ if err != nil {
+ return nil, err
+ }
+ return &Client{client}, nil
+}
+
+// Get gets the Subnet
+func (client *Client) Get(ctx context.Context, resourceGroupName string, parentResourceName string, resourceName string, expand *string) (result *armnetwork.Subnet, rerr error) {
+ var ops *armnetwork.SubnetsClientGetOptions
+ if expand != nil {
+ ops = &armnetwork.SubnetsClientGetOptions{Expand: expand}
+ }
+ resp, err := client.SubnetsClient.Get(ctx, resourceGroupName, parentResourceName, resourceName, ops)
+ if err != nil {
+ return nil, err
+ }
+ //handle statuscode
+ return &resp.Subnet, nil
+}
+
+// CreateOrUpdate creates or updates a Subnet.
+func (client *Client) CreateOrUpdate(ctx context.Context, resourceGroupName string, resourceName string, parentResourceName string, resource armnetwork.Subnet) (*armnetwork.Subnet, error) {
+ resp, err := utils.NewPollerWrapper(client.SubnetsClient.BeginCreateOrUpdate(ctx, resourceGroupName, resourceName, parentResourceName, resource, nil)).WaitforPollerResp(ctx)
+ if err != nil {
+ return nil, err
+ }
+ if resp != nil {
+ return &resp.Subnet, nil
+ }
+ return nil, nil
+}
+
+// Delete deletes a Subnet by name.
+func (client *Client) Delete(ctx context.Context, resourceGroupName string, parentResourceName string, resourceName string) error {
+ _, err := utils.NewPollerWrapper(client.BeginDelete(ctx, resourceGroupName, parentResourceName, resourceName, nil)).WaitforPollerResp(ctx)
+ return err
+}
+
+// List gets a list of Subnet in the resource group.
+func (client *Client) List(ctx context.Context, resourceGroupName string, parentResourceName string) (result []*armnetwork.Subnet, rerr error) {
+ pager := client.SubnetsClient.NewListPager(resourceGroupName, parentResourceName, nil)
+ for pager.More() {
+ nextResult, err := pager.NextPage(ctx)
+ if err != nil {
+ return nil, err
+ }
+ result = append(result, nextResult.Value...)
+ }
+ return result, nil
+}
diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils/armbalancer/armbalancer.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils/armbalancer/armbalancer.go
new file mode 100644
index 0000000000..b016122439
--- /dev/null
+++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils/armbalancer/armbalancer.go
@@ -0,0 +1,66 @@
+/*
+Copyright 2023 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package armbalancer
+
+import (
+ "context"
+ "net/http"
+)
+
+type Options struct {
+ Transport *http.Transport
+
+ // PoolSize is the max number of connections that will be created by the connection pool.
+ // Default: 8
+ PoolSize int
+
+ // RecycleThreshold is the lowest value of any X-Ms-Ratelimit-Remaining-* header that
+ // can be seen before the associated connection will be re-established.
+ // Default: 100
+ RecycleThreshold int64
+
+ // MinReqsBeforeRecycle is a safeguard to prevent frequent connection churn in the unlikely event
+ // that a connections lands on an ARM instance that already has a depleted rate limiting quota.
+ // Default: 10
+ MinReqsBeforeRecycle int64
+}
+
+// New wraps a transport to provide smart connection pooling and client-side load balancing.
+func New(ctx context.Context, opts Options) http.RoundTripper {
+ if opts.Transport == nil {
+ opts.Transport = http.DefaultTransport.(*http.Transport)
+ }
+
+ if opts.PoolSize == 0 {
+ opts.PoolSize = 8
+ }
+
+ if opts.RecycleThreshold == 0 {
+ opts.RecycleThreshold = 100
+ }
+ if opts.MinReqsBeforeRecycle == 0 {
+ opts.MinReqsBeforeRecycle = 10
+ }
+
+ return NewHostScopedTransport(ctx, func() *transportChannPool {
+ return newtransportChannPool(opts.PoolSize, func() Transport {
+ return &ClosableTransport{
+ Transport: opts.Transport.Clone(),
+ }
+ }, &KillBeforeThrottledPolicy{opts.RecycleThreshold})
+ })
+}
diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils/armbalancer/host_scoped_transport.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils/armbalancer/host_scoped_transport.go
new file mode 100644
index 0000000000..eb0986c4e1
--- /dev/null
+++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils/armbalancer/host_scoped_transport.go
@@ -0,0 +1,63 @@
+/*
+Copyright 2023 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package armbalancer
+
+import (
+ "context"
+ "net/http"
+ "strings"
+ "sync"
+
+ "golang.org/x/sync/errgroup"
+)
+
+func NewHostScopedTransport(ctx context.Context, transportFactory func() *transportChannPool) Transport {
+ ctx, cancelFn := context.WithCancel(ctx)
+ transport := &hostScopedTransport{
+ ctx: ctx,
+ cancelFn: cancelFn,
+ transportMap: sync.Map{},
+ transportFactory: transportFactory,
+ }
+ transport.serverGrp.SetLimit(-1)
+ return transport
+}
+
+type hostScopedTransport struct {
+ ctx context.Context
+ cancelFn context.CancelFunc
+ transportMap sync.Map
+ transportFactory func() *transportChannPool
+ serverGrp errgroup.Group
+}
+
+func (hostScopedTransport *hostScopedTransport) RoundTrip(req *http.Request) (*http.Response, error) {
+ transportRaw, ok := hostScopedTransport.transportMap.Load(strings.ToLower(req.Host))
+ if !ok {
+ transportPool := hostScopedTransport.transportFactory()
+ hostScopedTransport.serverGrp.Go(func() error { return transportPool.Run(hostScopedTransport.ctx) })
+ hostScopedTransport.transportMap.Store(strings.ToLower(req.Host), transportPool)
+ transportRaw = transportPool
+ }
+ transport := transportRaw.(Transport)
+ return transport.RoundTrip(req)
+}
+
+func (hostScopedTransport *hostScopedTransport) ForceClose() error {
+ hostScopedTransport.cancelFn()
+ return hostScopedTransport.serverGrp.Wait()
+}
diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils/armbalancer/transport.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils/armbalancer/transport.go
new file mode 100644
index 0000000000..4def516853
--- /dev/null
+++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils/armbalancer/transport.go
@@ -0,0 +1,36 @@
+/*
+Copyright 2023 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package armbalancer
+
+import (
+ "net/http"
+)
+
+var _ Transport = &ClosableTransport{}
+
+type ClosableTransport struct {
+ *http.Transport
+}
+
+func (transport *ClosableTransport) ForceClose() error {
+ transport.Transport.CloseIdleConnections()
+ return nil
+}
+
+func (transport *ClosableTransport) RoundTrip(req *http.Request) (*http.Response, error) {
+ return transport.Transport.RoundTrip(req)
+}
diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils/armbalancer/transport_pool.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils/armbalancer/transport_pool.go
new file mode 100644
index 0000000000..b2eb483ee4
--- /dev/null
+++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils/armbalancer/transport_pool.go
@@ -0,0 +1,160 @@
+/*
+Copyright 2023 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package armbalancer
+
+import (
+ "context"
+ "net/http"
+ "strconv"
+ "strings"
+ "sync"
+
+ "golang.org/x/sync/errgroup"
+)
+
+type transportChannPool struct {
+ sync.WaitGroup
+ capacity chan struct{}
+ pool chan Transport
+ transportFactory func() Transport
+ transportDropPolicy []TransportDropPolicy
+}
+
+type TransportDropPolicy interface {
+ ShouldDropTransport(header http.Header) bool
+}
+
+type TransportDropPolicyFunc func(header http.Header) bool
+
+func (function TransportDropPolicyFunc) ShouldDropTransport(header http.Header) bool {
+ if function == nil {
+ return false
+ }
+ return function(header)
+}
+
+type Transport interface {
+ http.RoundTripper
+ ForceClose() error
+}
+
+func newtransportChannPool(size int, transportFactory func() Transport, dropPolicy ...TransportDropPolicy) *transportChannPool {
+ if size <= 0 {
+ return nil
+ }
+ pool := &transportChannPool{
+ capacity: make(chan struct{}, size),
+ pool: make(chan Transport, size),
+ transportFactory: transportFactory,
+ transportDropPolicy: dropPolicy,
+ }
+ return pool
+}
+
+func (pool *transportChannPool) Run(ctx context.Context) error {
+CLEANUP:
+ for {
+ select {
+ case <-ctx.Done():
+ break CLEANUP
+ case pool.capacity <- struct{}{}:
+ pool.pool <- pool.transportFactory()
+ }
+ }
+
+ //cleanup
+ close(pool.capacity) // no more transport is added. consumers will be released if channel is closed.
+ errGroup := new(errgroup.Group)
+ errGroup.Go(func() error {
+ pool.Wait() // wait for transport recycle loop
+ close(pool.pool) // no more transport is added consumers will released if channel is closed.
+ return nil
+ })
+ for transport := range pool.pool {
+ transport := transport
+ errGroup.Go(transport.ForceClose)
+ }
+ return errGroup.Wait() // close all of transports in pool
+}
+
+func (pool *transportChannPool) RoundTrip(req *http.Request) (*http.Response, error) {
+ transport, err := pool.selectTransport(req)
+ if err != nil {
+ return nil, err
+ }
+ resp, err := transport.RoundTrip(req)
+ var header http.Header
+ if resp != nil {
+ header = resp.Header.Clone()
+ }
+ pool.Add(1)
+ go pool.recycleTransport(transport, header)
+ return resp, err
+}
+
+func (pool *transportChannPool) selectTransport(req *http.Request) (Transport, error) {
+ for {
+ var t Transport
+ var ok bool
+ select {
+ case t, ok = <-pool.pool:
+ if !ok {
+ return nil, http.ErrServerClosed
+ }
+ return t, nil
+ case <-req.Context().Done():
+ return nil, http.ErrServerClosed
+ }
+ }
+}
+
+func (pool *transportChannPool) recycleTransport(t Transport, header http.Header) {
+ defer pool.Done()
+ for _, policy := range pool.transportDropPolicy {
+ if policy.ShouldDropTransport(header) {
+ t.ForceClose() // drop the transport
+ <-pool.capacity // notify pool to create new transport
+ return
+ }
+ }
+ pool.pool <- t
+}
+
+func (pool *transportChannPool) ForceClose() error {
+ close(pool.pool)
+ return nil
+}
+
+type KillBeforeThrottledPolicy struct {
+ RecycleThreshold int64
+}
+
+func (policy *KillBeforeThrottledPolicy) ShouldDropTransport(header http.Header) bool {
+ for key, vals := range header {
+ if !strings.HasPrefix(key, "X-Ms-Ratelimit-Remaining-") {
+ continue
+ }
+ n, err := strconv.ParseInt(vals[0], 10, 0)
+ if err != nil {
+ continue
+ }
+ if n < policy.RecycleThreshold {
+ return true
+ }
+ }
+ return false
+}
diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils/const.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils/const.go
new file mode 100644
index 0000000000..59ddc43f34
--- /dev/null
+++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils/const.go
@@ -0,0 +1,24 @@
+/*
+Copyright 2023 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package utils
+
+const (
+ AzureClientID = "AZURE_CLIENT_ID"
+ AZURECLIENTSECRET = "AZURE_CLIENT_SECRET" //nolint:gosec
+ AzureFederatedTokenFile = "AZURE_FEDERATED_TOKEN_FILE"
+ AzureTenantID = "AZURE_TENANT_ID"
+)
diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils/interface.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils/interface.go
new file mode 100644
index 0000000000..02ca3b1209
--- /dev/null
+++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils/interface.go
@@ -0,0 +1,69 @@
+/*
+Copyright 2023 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package utils
+
+import "context"
+
+// Get gets the service resource
+type GetFunc[Type interface{}] interface {
+ Get(ctx context.Context, resourceGroupName string, resourceName string) (result *Type, rerr error)
+}
+
+// Get gets the service resource
+type SubResourceGetFunc[Type interface{}] interface {
+ Get(ctx context.Context, resourceGroupName string, parentResourceName string, resourceName string) (result *Type, rerr error)
+}
+
+// Get gets the service resource
+type GetWithExpandFunc[Type interface{}] interface {
+ Get(ctx context.Context, resourceGroupName string, resourceName string, expand *string) (result *Type, rerr error)
+}
+
+// Get gets the service resource
+type SubResourceGetWithExpandFunc[Type interface{}] interface {
+ Get(ctx context.Context, resourceGroupName string, parentResourceName string, resourceName string, expand *string) (result *Type, rerr error)
+}
+
+// List gets a list of service resource in the resource group.
+type ListFunc[Type interface{}] interface {
+ List(ctx context.Context, resourceGroupName string) (result []*Type, rerr error)
+}
+
+// List gets a list of service resource in the resource group.
+type SubResourceListFunc[Type interface{}] interface {
+ List(ctx context.Context, resourceGroupName string, parentResourceName string) (result []*Type, rerr error)
+}
+
+// CreateOrUpdate creates or updates a service resource.
+type CreateOrUpdateFunc[Type interface{}] interface {
+ CreateOrUpdate(ctx context.Context, resourceGroupName string, resourceName string, resourceParam Type) (*Type, error)
+}
+
+// CreateOrUpdate creates or updates a service resource.
+type SubResourceCreateOrUpdateFunc[Type interface{}] interface {
+ CreateOrUpdate(ctx context.Context, resourceGroupName string, parentResourceName string, resourceName string, resourceParam Type) (*Type, error)
+}
+
+// Delete deletes a service resource by name.
+type DeleteFunc[Type interface{}] interface {
+ Delete(ctx context.Context, resourceGroupName string, resourceName string) error
+}
+
+// Delete deletes a service resource by name.
+type SubResourceDeleteFunc[Type interface{}] interface {
+ Delete(ctx context.Context, resourceGroupName string, parentResourceName string, resourceName string) error
+}
diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils/options.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils/options.go
new file mode 100644
index 0000000000..4a1378139b
--- /dev/null
+++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils/options.go
@@ -0,0 +1,58 @@
+/*
+Copyright 2023 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package utils
+
+import (
+ "net/http"
+ "time"
+
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing"
+
+ "sigs.k8s.io/cloud-provider-azure/pkg/azclient/policy/retryrepectthrottled"
+)
+
+var TracingProvider tracing.Provider
+
+const (
+ DefaultMaxRetries = 3
+ DefaultMaxRetryDelay = 60 * time.Second
+ DefaultRetryDelay = 5 * time.Second
+ DefaultTryTimeout = 10 * time.Second
+)
+
+func GetDefaultOption() *arm.ClientOptions {
+ return &arm.ClientOptions{
+ ClientOptions: policy.ClientOptions{
+ Retry: policy.RetryOptions{
+ RetryDelay: DefaultRetryDelay,
+ MaxRetryDelay: DefaultMaxRetryDelay,
+ MaxRetries: DefaultMaxRetries,
+ TryTimeout: DefaultTryTimeout,
+ StatusCodes: retryrepectthrottled.GetRetriableStatusCode(),
+ },
+ PerRetryPolicies: []policy.Policy{
+ retryrepectthrottled.NewThrottlingPolicy(),
+ },
+ Transport: &http.Client{
+ Transport: DefaultTransport,
+ },
+ TracingProvider: TracingProvider,
+ },
+ }
+}
diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils/policy_wrapper.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils/policy_wrapper.go
new file mode 100644
index 0000000000..d59d6016c9
--- /dev/null
+++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils/policy_wrapper.go
@@ -0,0 +1,33 @@
+/*
+Copyright 2023 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package utils
+
+import (
+ "errors"
+ "net/http"
+
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
+)
+
+type FuncPolicyWrapper func(req *policy.Request) (*http.Response, error)
+
+func (f FuncPolicyWrapper) Do(req *policy.Request) (*http.Response, error) {
+ if f != nil {
+ return f(req)
+ }
+ return nil, errors.New("policy func is nil")
+}
diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils/poller.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils/poller.go
new file mode 100644
index 0000000000..63a9300884
--- /dev/null
+++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils/poller.go
@@ -0,0 +1,55 @@
+/*
+Copyright 2023 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package utils
+
+import (
+ "context"
+ "errors"
+ "time"
+
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
+)
+
+func NewPollerWrapper[ResponseType interface{}](poller *runtime.Poller[ResponseType], err error) *PollerWrapper[ResponseType] {
+ return &PollerWrapper[ResponseType]{
+ poller: poller,
+ err: err,
+ }
+}
+
+type PollerWrapper[ResponseType interface{}] struct {
+ poller *runtime.Poller[ResponseType]
+ err error
+}
+
+// Poller is the poller to be used for polling.
+// assume that the poller will ends
+func (handler *PollerWrapper[ResponseType]) WaitforPollerResp(ctx context.Context) (result *ResponseType, err error) {
+ if handler.err != nil {
+ return nil, handler.err
+ }
+ if handler.poller == nil {
+ return nil, errors.New("poller is nil")
+ }
+ resp, err := handler.poller.PollUntilDone(ctx, &runtime.PollUntilDoneOptions{
+ Frequency: time.Second * 15,
+ })
+ if err != nil {
+ return nil, err
+ }
+ return &resp, nil
+}
diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils/transport.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils/transport.go
new file mode 100644
index 0000000000..bc25231880
--- /dev/null
+++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils/transport.go
@@ -0,0 +1,48 @@
+/*
+Copyright 2023 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package utils
+
+import (
+ "crypto/tls"
+ "net"
+ "net/http"
+ "sync"
+ "time"
+)
+
+var DefaultTransport *http.Transport
+var once sync.Once
+
+func init() {
+ once.Do(func() {
+ DefaultTransport = &http.Transport{
+ Proxy: http.ProxyFromEnvironment,
+ DialContext: (&net.Dialer{
+ Timeout: 30 * time.Second,
+ KeepAlive: 30 * time.Second,
+ }).DialContext,
+ ForceAttemptHTTP2: true,
+ MaxIdleConns: 100,
+ MaxConnsPerHost: 100,
+ IdleConnTimeout: 90 * time.Second,
+ TLSHandshakeTimeout: 10 * time.Second,
+ TLSClientConfig: &tls.Config{
+ MinVersion: tls.VersionTLS12,
+ },
+ }
+ })
+}
diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachineclient/custom.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachineclient/custom.go
new file mode 100644
index 0000000000..b3db142863
--- /dev/null
+++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachineclient/custom.go
@@ -0,0 +1,39 @@
+/*
+Copyright 2023 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package virtualmachineclient
+
+import (
+ "context"
+
+ "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5"
+)
+
+// Get gets the VirtualMachine
+func (client *Client) Get(ctx context.Context, resourceGroupName string, resourceName string, expand *string) (result *armcompute.VirtualMachine, rerr error) {
+ var ops *armcompute.VirtualMachinesClientGetOptions
+ if expand != nil {
+ expand := armcompute.InstanceViewTypes(*expand)
+ ops = &armcompute.VirtualMachinesClientGetOptions{Expand: &expand}
+ }
+
+ resp, err := client.VirtualMachinesClient.Get(ctx, resourceGroupName, resourceName, ops)
+ if err != nil {
+ return nil, err
+ }
+ //handle statuscode
+ return &resp.VirtualMachine, nil
+}
diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachineclient/interface.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachineclient/interface.go
new file mode 100644
index 0000000000..ef228f6403
--- /dev/null
+++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachineclient/interface.go
@@ -0,0 +1,32 @@
+/*
+Copyright 2023 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +azure:enableclientgen:=true
+package virtualmachineclient
+
+import (
+ armcompute "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5"
+
+ "sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils"
+)
+
+// +azure:client:verbs=createorupdate;delete;list,resource=VirtualMachine,packageName=github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5,packageAlias=armcompute,clientName=VirtualMachinesClient,expand=true,rateLimitKey=virtualMachineRateLimit
+type Interface interface {
+ utils.GetWithExpandFunc[armcompute.VirtualMachine]
+ utils.CreateOrUpdateFunc[armcompute.VirtualMachine]
+ utils.DeleteFunc[armcompute.VirtualMachine]
+ utils.ListFunc[armcompute.VirtualMachine]
+}
diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachineclient/zz_generated_client.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachineclient/zz_generated_client.go
new file mode 100644
index 0000000000..b4b099a345
--- /dev/null
+++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachineclient/zz_generated_client.go
@@ -0,0 +1,75 @@
+// /*
+// Copyright The Kubernetes Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+// */
+
+// Code generated by client-gen. DO NOT EDIT.
+package virtualmachineclient
+
+import (
+ "context"
+
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm"
+ armcompute "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5"
+
+ "sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils"
+)
+
+type Client struct {
+ *armcompute.VirtualMachinesClient
+}
+
+func New(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (Interface, error) {
+ if options == nil {
+ options = utils.GetDefaultOption()
+ }
+
+ client, err := armcompute.NewVirtualMachinesClient(subscriptionID, credential, options)
+ if err != nil {
+ return nil, err
+ }
+ return &Client{client}, nil
+}
+
+// CreateOrUpdate creates or updates a VirtualMachine.
+func (client *Client) CreateOrUpdate(ctx context.Context, resourceGroupName string, resourceName string, resource armcompute.VirtualMachine) (*armcompute.VirtualMachine, error) {
+ resp, err := utils.NewPollerWrapper(client.VirtualMachinesClient.BeginCreateOrUpdate(ctx, resourceGroupName, resourceName, resource, nil)).WaitforPollerResp(ctx)
+ if err != nil {
+ return nil, err
+ }
+ if resp != nil {
+ return &resp.VirtualMachine, nil
+ }
+ return nil, nil
+}
+
+// Delete deletes a VirtualMachine by name.
+func (client *Client) Delete(ctx context.Context, resourceGroupName string, resourceName string) error {
+ _, err := utils.NewPollerWrapper(client.BeginDelete(ctx, resourceGroupName, resourceName, nil)).WaitforPollerResp(ctx)
+ return err
+}
+
+// List gets a list of VirtualMachine in the resource group.
+func (client *Client) List(ctx context.Context, resourceGroupName string) (result []*armcompute.VirtualMachine, rerr error) {
+ pager := client.VirtualMachinesClient.NewListPager(resourceGroupName, nil)
+ for pager.More() {
+ nextResult, err := pager.NextPage(ctx)
+ if err != nil {
+ return nil, err
+ }
+ result = append(result, nextResult.Value...)
+ }
+ return result, nil
+}
diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachinescalesetclient/interface.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachinescalesetclient/interface.go
new file mode 100644
index 0000000000..ebc97c029c
--- /dev/null
+++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachinescalesetclient/interface.go
@@ -0,0 +1,32 @@
+/*
+Copyright 2023 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +azure:enableclientgen:=true
+package virtualmachinescalesetclient
+
+import (
+ armcompute "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5"
+
+ "sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils"
+)
+
+// +azure:client:verbs=get;createorupdate;delete;list,resource=VirtualMachineScaleSet,packageName=github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5,packageAlias=armcompute,clientName=VirtualMachineScaleSetsClient,expand=false,rateLimitKey=virtualMachineSizesRateLimit
+type Interface interface {
+ utils.GetFunc[armcompute.VirtualMachineScaleSet]
+ utils.CreateOrUpdateFunc[armcompute.VirtualMachineScaleSet]
+ utils.DeleteFunc[armcompute.VirtualMachineScaleSet]
+ utils.ListFunc[armcompute.VirtualMachineScaleSet]
+}
diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachinescalesetclient/zz_generated_client.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachinescalesetclient/zz_generated_client.go
new file mode 100644
index 0000000000..4f0c60b600
--- /dev/null
+++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachinescalesetclient/zz_generated_client.go
@@ -0,0 +1,86 @@
+// /*
+// Copyright The Kubernetes Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+// */
+
+// Code generated by client-gen. DO NOT EDIT.
+package virtualmachinescalesetclient
+
+import (
+ "context"
+
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm"
+ armcompute "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5"
+
+ "sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils"
+)
+
+type Client struct {
+ *armcompute.VirtualMachineScaleSetsClient
+}
+
+func New(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (Interface, error) {
+ if options == nil {
+ options = utils.GetDefaultOption()
+ }
+
+ client, err := armcompute.NewVirtualMachineScaleSetsClient(subscriptionID, credential, options)
+ if err != nil {
+ return nil, err
+ }
+ return &Client{client}, nil
+}
+
+// Get gets the VirtualMachineScaleSet
+func (client *Client) Get(ctx context.Context, resourceGroupName string, resourceName string) (result *armcompute.VirtualMachineScaleSet, rerr error) {
+
+ resp, err := client.VirtualMachineScaleSetsClient.Get(ctx, resourceGroupName, resourceName, nil)
+ if err != nil {
+ return nil, err
+ }
+ //handle statuscode
+ return &resp.VirtualMachineScaleSet, nil
+}
+
+// CreateOrUpdate creates or updates a VirtualMachineScaleSet.
+func (client *Client) CreateOrUpdate(ctx context.Context, resourceGroupName string, resourceName string, resource armcompute.VirtualMachineScaleSet) (*armcompute.VirtualMachineScaleSet, error) {
+ resp, err := utils.NewPollerWrapper(client.VirtualMachineScaleSetsClient.BeginCreateOrUpdate(ctx, resourceGroupName, resourceName, resource, nil)).WaitforPollerResp(ctx)
+ if err != nil {
+ return nil, err
+ }
+ if resp != nil {
+ return &resp.VirtualMachineScaleSet, nil
+ }
+ return nil, nil
+}
+
+// Delete deletes a VirtualMachineScaleSet by name.
+func (client *Client) Delete(ctx context.Context, resourceGroupName string, resourceName string) error {
+ _, err := utils.NewPollerWrapper(client.BeginDelete(ctx, resourceGroupName, resourceName, nil)).WaitforPollerResp(ctx)
+ return err
+}
+
+// List gets a list of VirtualMachineScaleSet in the resource group.
+func (client *Client) List(ctx context.Context, resourceGroupName string) (result []*armcompute.VirtualMachineScaleSet, rerr error) {
+ pager := client.VirtualMachineScaleSetsClient.NewListPager(resourceGroupName, nil)
+ for pager.More() {
+ nextResult, err := pager.NextPage(ctx)
+ if err != nil {
+ return nil, err
+ }
+ result = append(result, nextResult.Value...)
+ }
+ return result, nil
+}
diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachinescalesetvmclient/custom.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachinescalesetvmclient/custom.go
new file mode 100644
index 0000000000..72747bec1d
--- /dev/null
+++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachinescalesetvmclient/custom.go
@@ -0,0 +1,99 @@
+/*
+Copyright 2023 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package virtualmachinescalesetvmclient
+
+import (
+ "context"
+ "errors"
+ "sync"
+
+ armcompute "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5"
+
+ "sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils"
+)
+
+// Update updates a VirtualMachine.
+func (client *Client) Update(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, parameters armcompute.VirtualMachineScaleSetVM) (*armcompute.VirtualMachineScaleSetVM, error) {
+ resp, err := client.UpdateAsync(ctx, resourceGroupName, VMScaleSetName, instanceID, parameters).WaitforPollerResp(ctx)
+ if err != nil {
+ return nil, err
+ }
+ if resp != nil {
+ return &resp.VirtualMachineScaleSetVM, nil
+ }
+ return nil, nil
+}
+
+func (client *Client) UpdateAsync(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, parameters armcompute.VirtualMachineScaleSetVM) *utils.PollerWrapper[armcompute.VirtualMachineScaleSetVMsClientUpdateResponse] {
+ return utils.NewPollerWrapper(client.VirtualMachineScaleSetVMsClient.BeginUpdate(ctx, resourceGroupName, VMScaleSetName, instanceID, parameters, nil))
+}
+
+func UpdateVMsInBatch(ctx context.Context, client *Client, resourceGroupName string, VMScaleSetName string, instances map[string]armcompute.VirtualMachineScaleSetVM, batchSize int) error {
+ if batchSize <= 0 {
+ return errors.New("batchSize should be greater than 0")
+ }
+
+ if batchSize == 1 {
+ for instanceID, vm := range instances {
+ if _, err := client.Update(ctx, resourceGroupName, VMScaleSetName, instanceID, vm); err != nil {
+ return err
+ }
+ }
+ return nil
+ }
+
+ cocurrentFence := make(chan struct{}, batchSize)
+ errChannel := make(chan error, len(instances))
+ var workerGroup sync.WaitGroup
+ var err error
+ for instanceID, vm := range instances {
+ select {
+ case cocurrentFence <- struct{}{}:
+ workerGroup.Add(1)
+ go func(instanceID string, vm armcompute.VirtualMachineScaleSetVM) {
+ defer workerGroup.Done()
+ defer func() { <-cocurrentFence }()
+ _, err := client.Update(ctx, resourceGroupName, VMScaleSetName, instanceID, vm)
+ if err != nil {
+ errChannel <- err
+ return
+ }
+ }(instanceID, vm)
+ case err = <-errChannel:
+ if err != nil {
+ break
+ }
+ }
+ }
+ workerGroup.Wait()
+ close(cocurrentFence)
+ close(errChannel)
+ return err
+}
+
+// List gets a list of VirtualMachineScaleSetVM in the resource group.
+func (client *Client) List(ctx context.Context, resourceGroupName string, parentResourceName string) (result []*armcompute.VirtualMachineScaleSetVM, rerr error) {
+ pager := client.VirtualMachineScaleSetVMsClient.NewListPager(resourceGroupName, parentResourceName, nil)
+ for pager.More() {
+ nextResult, err := pager.NextPage(ctx)
+ if err != nil {
+ return nil, err
+ }
+ result = append(result, nextResult.Value...)
+ }
+ return result, nil
+}
diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachinescalesetvmclient/interface.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachinescalesetvmclient/interface.go
new file mode 100644
index 0000000000..bae4fdff67
--- /dev/null
+++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachinescalesetvmclient/interface.go
@@ -0,0 +1,35 @@
+/*
+Copyright 2023 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +azure:enableclientgen:=true
+package virtualmachinescalesetvmclient
+
+import (
+ "context"
+
+ armcompute "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5"
+
+ "sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils"
+)
+
+// +azure:client:verbs=get;delete,resource=VirtualMachineScaleSet,subResource=VirtualMachineScaleSetVM,packageName=github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5,packageAlias=armcompute,clientName=VirtualMachineScaleSetVMsClient,expand=false
+type Interface interface {
+ utils.SubResourceGetFunc[armcompute.VirtualMachineScaleSetVM]
+ utils.SubResourceDeleteFunc[armcompute.VirtualMachineScaleSetVM]
+ utils.SubResourceListFunc[armcompute.VirtualMachineScaleSetVM]
+ // Update updates a VirtualMachineScaleSetVM.
+ Update(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, parameters armcompute.VirtualMachineScaleSetVM) (*armcompute.VirtualMachineScaleSetVM, error)
+}
diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachinescalesetvmclient/zz_generated_client.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachinescalesetvmclient/zz_generated_client.go
new file mode 100644
index 0000000000..3f06f4f965
--- /dev/null
+++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualmachinescalesetvmclient/zz_generated_client.go
@@ -0,0 +1,61 @@
+// /*
+// Copyright The Kubernetes Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+// */
+
+// Code generated by client-gen. DO NOT EDIT.
+package virtualmachinescalesetvmclient
+
+import (
+ "context"
+
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm"
+ armcompute "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5"
+
+ "sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils"
+)
+
+type Client struct {
+ *armcompute.VirtualMachineScaleSetVMsClient
+}
+
+func New(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (Interface, error) {
+ if options == nil {
+ options = utils.GetDefaultOption()
+ }
+
+ client, err := armcompute.NewVirtualMachineScaleSetVMsClient(subscriptionID, credential, options)
+ if err != nil {
+ return nil, err
+ }
+ return &Client{client}, nil
+}
+
+// Get gets the VirtualMachineScaleSetVM
+func (client *Client) Get(ctx context.Context, resourceGroupName string, parentResourceName string, resourceName string) (result *armcompute.VirtualMachineScaleSetVM, rerr error) {
+
+ resp, err := client.VirtualMachineScaleSetVMsClient.Get(ctx, resourceGroupName, parentResourceName, resourceName, nil)
+ if err != nil {
+ return nil, err
+ }
+ //handle statuscode
+ return &resp.VirtualMachineScaleSetVM, nil
+}
+
+// Delete deletes a VirtualMachineScaleSetVM by name.
+func (client *Client) Delete(ctx context.Context, resourceGroupName string, parentResourceName string, resourceName string) error {
+ _, err := utils.NewPollerWrapper(client.BeginDelete(ctx, resourceGroupName, parentResourceName, resourceName, nil)).WaitforPollerResp(ctx)
+ return err
+}
diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualnetworkclient/interface.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualnetworkclient/interface.go
new file mode 100644
index 0000000000..c9404ba4dc
--- /dev/null
+++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualnetworkclient/interface.go
@@ -0,0 +1,32 @@
+/*
+Copyright 2023 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +azure:enableclientgen:=true
+package virtualnetworkclient
+
+import (
+ armnetwork "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4"
+
+ "sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils"
+)
+
+// +azure:client:verbs=get;createorupdate;delete;list,resource=VirtualNetwork,packageName=github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4,packageAlias=armnetwork,clientName=VirtualNetworksClient,expand=true
+type Interface interface {
+ utils.GetWithExpandFunc[armnetwork.VirtualNetwork]
+ utils.CreateOrUpdateFunc[armnetwork.VirtualNetwork]
+ utils.DeleteFunc[armnetwork.VirtualNetwork]
+ utils.ListFunc[armnetwork.VirtualNetwork]
+}
diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualnetworkclient/zz_generated_client.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualnetworkclient/zz_generated_client.go
new file mode 100644
index 0000000000..9e83953836
--- /dev/null
+++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualnetworkclient/zz_generated_client.go
@@ -0,0 +1,89 @@
+// /*
+// Copyright The Kubernetes Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+// */
+
+// Code generated by client-gen. DO NOT EDIT.
+package virtualnetworkclient
+
+import (
+ "context"
+
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm"
+ armnetwork "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4"
+
+ "sigs.k8s.io/cloud-provider-azure/pkg/azclient/utils"
+)
+
+type Client struct {
+ *armnetwork.VirtualNetworksClient
+}
+
+func New(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (Interface, error) {
+ if options == nil {
+ options = utils.GetDefaultOption()
+ }
+
+ client, err := armnetwork.NewVirtualNetworksClient(subscriptionID, credential, options)
+ if err != nil {
+ return nil, err
+ }
+ return &Client{client}, nil
+}
+
+// Get gets the VirtualNetwork
+func (client *Client) Get(ctx context.Context, resourceGroupName string, resourceName string, expand *string) (result *armnetwork.VirtualNetwork, rerr error) {
+ var ops *armnetwork.VirtualNetworksClientGetOptions
+ if expand != nil {
+ ops = &armnetwork.VirtualNetworksClientGetOptions{Expand: expand}
+ }
+ resp, err := client.VirtualNetworksClient.Get(ctx, resourceGroupName, resourceName, ops)
+ if err != nil {
+ return nil, err
+ }
+ //handle statuscode
+ return &resp.VirtualNetwork, nil
+}
+
+// CreateOrUpdate creates or updates a VirtualNetwork.
+func (client *Client) CreateOrUpdate(ctx context.Context, resourceGroupName string, resourceName string, resource armnetwork.VirtualNetwork) (*armnetwork.VirtualNetwork, error) {
+ resp, err := utils.NewPollerWrapper(client.VirtualNetworksClient.BeginCreateOrUpdate(ctx, resourceGroupName, resourceName, resource, nil)).WaitforPollerResp(ctx)
+ if err != nil {
+ return nil, err
+ }
+ if resp != nil {
+ return &resp.VirtualNetwork, nil
+ }
+ return nil, nil
+}
+
+// Delete deletes a VirtualNetwork by name.
+func (client *Client) Delete(ctx context.Context, resourceGroupName string, resourceName string) error {
+ _, err := utils.NewPollerWrapper(client.BeginDelete(ctx, resourceGroupName, resourceName, nil)).WaitforPollerResp(ctx)
+ return err
+}
+
+// List gets a list of VirtualNetwork in the resource group.
+func (client *Client) List(ctx context.Context, resourceGroupName string) (result []*armnetwork.VirtualNetwork, rerr error) {
+ pager := client.VirtualNetworksClient.NewListPager(resourceGroupName, nil)
+ for pager.More() {
+ nextResult, err := pager.NextPage(ctx)
+ if err != nil {
+ return nil, err
+ }
+ result = append(result, nextResult.Value...)
+ }
+ return result, nil
+}
From a07a2c22548378378dcc050b56ed3a38dc9fd9b1 Mon Sep 17 00:00:00 2001
From: umagnus
Date: Tue, 18 Jun 2024 07:32:06 +0000
Subject: [PATCH 2/5] fix
---
go.mod | 93 +-
go.sum | 154 +-
log.txt | 1737 +++++
.../Microsoft/go-winio/.gitattributes | 1 +
.../github.com/Microsoft/go-winio/.gitignore | 9 +
.../Microsoft/go-winio/.golangci.yml | 144 +
.../github.com/Microsoft/go-winio/README.md | 85 +-
.../github.com/Microsoft/go-winio/SECURITY.md | 41 +
.../github.com/Microsoft/go-winio/backup.go | 48 +-
vendor/github.com/Microsoft/go-winio/doc.go | 22 +
vendor/github.com/Microsoft/go-winio/ea.go | 8 +-
vendor/github.com/Microsoft/go-winio/file.go | 70 +-
.../github.com/Microsoft/go-winio/fileinfo.go | 29 +-
.../github.com/Microsoft/go-winio/hvsock.go | 360 +-
.../go-winio/internal/socket/rawaddr.go | 20 +
.../go-winio/internal/socket/socket.go | 179 +
.../internal/socket/zsyscall_windows.go | 72 +
vendor/github.com/Microsoft/go-winio/pipe.go | 124 +-
.../Microsoft/go-winio/pkg/guid/guid.go | 25 +-
.../go-winio/pkg/guid/guid_nonwindows.go | 16 +
.../go-winio/pkg/guid/guid_windows.go | 13 +
.../go-winio/pkg/guid/variant_string.go | 27 +
.../Microsoft/go-winio/privilege.go | 37 +-
.../github.com/Microsoft/go-winio/reparse.go | 11 +-
vendor/github.com/Microsoft/go-winio/sd.go | 64 +-
.../github.com/Microsoft/go-winio/syscall.go | 4 +-
vendor/github.com/Microsoft/go-winio/tools.go | 5 +
.../Microsoft/go-winio/zsyscall_windows.go | 45 +-
.../coreos/go-semver/semver/semver.go | 2 +-
.../go-systemd/v22/journal/journal_unix.go | 52 +
.../go-systemd/v22/journal/journal_windows.go | 8 +
.../distribution/reference/reference.go | 4 +-
.../go.etcd.io/etcd/api/v3/version/version.go | 2 +-
.../etcd/client/pkg/v3/logutil/zap.go | 24 +-
.../etcd/client/pkg/v3/tlsutil/versions.go | 47 +
.../etcd/client/pkg/v3/transport/listener.go | 33 +-
vendor/go.etcd.io/etcd/client/v3/doc.go | 4 +-
.../client/v3/internal/endpoint/endpoint.go | 13 +-
vendor/go.etcd.io/etcd/client/v3/txn.go | 17 +-
vendor/go.etcd.io/etcd/client/v3/watch.go | 2 +-
vendor/go.uber.org/atomic/.gitignore | 3 +
vendor/go.uber.org/atomic/.travis.yml | 27 -
vendor/go.uber.org/atomic/CHANGELOG.md | 61 +-
vendor/go.uber.org/atomic/Makefile | 1 +
vendor/go.uber.org/atomic/README.md | 4 +-
vendor/go.uber.org/atomic/bool.go | 27 +-
vendor/go.uber.org/atomic/bool_ext.go | 2 +-
vendor/go.uber.org/atomic/duration.go | 27 +-
vendor/go.uber.org/atomic/duration_ext.go | 8 +-
vendor/go.uber.org/atomic/error.go | 23 +-
vendor/go.uber.org/atomic/error_ext.go | 4 +-
vendor/go.uber.org/atomic/float32.go | 77 +
vendor/go.uber.org/atomic/float32_ext.go | 76 +
vendor/go.uber.org/atomic/float64.go | 19 +-
vendor/go.uber.org/atomic/float64_ext.go | 43 +-
vendor/go.uber.org/atomic/gen.go | 1 +
vendor/go.uber.org/atomic/int32.go | 31 +-
vendor/go.uber.org/atomic/int64.go | 31 +-
vendor/go.uber.org/atomic/nocmp.go | 12 +-
vendor/go.uber.org/atomic/pointer_go118.go | 60 +
vendor/go.uber.org/atomic/pointer_go119.go | 61 +
vendor/go.uber.org/atomic/string.go | 23 +-
vendor/go.uber.org/atomic/string_ext.go | 4 +-
vendor/go.uber.org/atomic/time.go | 55 +
vendor/go.uber.org/atomic/time_ext.go | 36 +
vendor/go.uber.org/atomic/uint32.go | 31 +-
vendor/go.uber.org/atomic/uint64.go | 31 +-
vendor/go.uber.org/atomic/uintptr.go | 109 +
vendor/go.uber.org/atomic/unsafe_pointer.go | 65 +
vendor/go.uber.org/atomic/value.go | 4 +-
vendor/go.uber.org/multierr/.travis.yml | 23 -
vendor/go.uber.org/multierr/CHANGELOG.md | 35 +
vendor/go.uber.org/multierr/LICENSE.txt | 2 +-
vendor/go.uber.org/multierr/Makefile | 6 +-
vendor/go.uber.org/multierr/README.md | 30 +-
vendor/go.uber.org/multierr/error.go | 415 +-
.../go.uber.org/multierr/error_post_go120.go | 48 +
.../multierr/{go113.go => error_pre_go120.go} | 31 +-
vendor/go.uber.org/multierr/glide.yaml | 8 -
vendor/golang.org/x/crypto/hkdf/hkdf.go | 95 +
vendor/golang.org/x/mod/LICENSE | 27 +
vendor/golang.org/x/mod/PATENTS | 22 +
vendor/golang.org/x/mod/semver/semver.go | 401 ++
.../x/tools/cmd/stringer/stringer.go | 660 ++
.../x/tools/go/gcexportdata/gcexportdata.go | 186 +
.../x/tools/go/gcexportdata/importer.go | 75 +
.../tools/go/internal/packagesdriver/sizes.go | 53 +
vendor/golang.org/x/tools/go/packages/doc.go | 240 +
.../x/tools/go/packages/external.go | 101 +
.../golang.org/x/tools/go/packages/golist.go | 1107 +++
.../x/tools/go/packages/golist_overlay.go | 83 +
.../x/tools/go/packages/loadmode_string.go | 57 +
.../x/tools/go/packages/packages.go | 1347 ++++
.../golang.org/x/tools/go/packages/visit.go | 59 +
.../x/tools/go/types/objectpath/objectpath.go | 752 +++
.../x/tools/internal/event/core/event.go | 85 +
.../x/tools/internal/event/core/export.go | 70 +
.../x/tools/internal/event/core/fast.go | 77 +
.../golang.org/x/tools/internal/event/doc.go | 7 +
.../x/tools/internal/event/event.go | 127 +
.../x/tools/internal/event/keys/keys.go | 564 ++
.../x/tools/internal/event/keys/standard.go | 22 +
.../x/tools/internal/event/keys/util.go | 21 +
.../x/tools/internal/event/label/label.go | 215 +
.../x/tools/internal/event/tag/tag.go | 59 +
.../x/tools/internal/gcimporter/bimport.go | 150 +
.../x/tools/internal/gcimporter/exportdata.go | 99 +
.../x/tools/internal/gcimporter/gcimporter.go | 273 +
.../x/tools/internal/gcimporter/iexport.go | 1321 ++++
.../x/tools/internal/gcimporter/iimport.go | 1082 +++
.../internal/gcimporter/newInterface10.go | 22 +
.../internal/gcimporter/newInterface11.go | 14 +
.../internal/gcimporter/support_go117.go | 16 +
.../internal/gcimporter/support_go118.go | 37 +
.../x/tools/internal/gcimporter/unified_no.go | 10 +
.../tools/internal/gcimporter/unified_yes.go | 10 +
.../x/tools/internal/gcimporter/ureader_no.go | 19 +
.../tools/internal/gcimporter/ureader_yes.go | 728 ++
.../x/tools/internal/gocommand/invoke.go | 465 ++
.../x/tools/internal/gocommand/vendor.go | 109 +
.../x/tools/internal/gocommand/version.go | 71 +
.../internal/packagesinternal/packages.go | 22 +
.../x/tools/internal/pkgbits/codes.go | 77 +
.../x/tools/internal/pkgbits/decoder.go | 517 ++
.../x/tools/internal/pkgbits/doc.go | 32 +
.../x/tools/internal/pkgbits/encoder.go | 383 ++
.../x/tools/internal/pkgbits/flags.go | 9 +
.../x/tools/internal/pkgbits/frames_go1.go | 21 +
.../x/tools/internal/pkgbits/frames_go17.go | 28 +
.../x/tools/internal/pkgbits/reloc.go | 42 +
.../x/tools/internal/pkgbits/support.go | 17 +
.../x/tools/internal/pkgbits/sync.go | 113 +
.../internal/pkgbits/syncmarker_string.go | 89 +
.../internal/tokeninternal/tokeninternal.go | 151 +
.../x/tools/internal/typeparams/common.go | 204 +
.../x/tools/internal/typeparams/coretype.go | 122 +
.../x/tools/internal/typeparams/normalize.go | 218 +
.../x/tools/internal/typeparams/termlist.go | 163 +
.../x/tools/internal/typeparams/typeterm.go | 169 +
.../tools/internal/typesinternal/errorcode.go | 1560 +++++
.../typesinternal/errorcode_string.go | 179 +
.../x/tools/internal/typesinternal/types.go | 52 +
.../tools/internal/typesinternal/types_118.go | 19 +
.../x/tools/internal/versions/gover.go | 172 +
.../x/tools/internal/versions/types.go | 19 +
.../x/tools/internal/versions/types_go121.go | 20 +
.../x/tools/internal/versions/types_go122.go | 24 +
.../tools/internal/versions/versions_go121.go | 49 +
.../tools/internal/versions/versions_go122.go | 38 +
.../natefinch/lumberjack.v2/.travis.yml | 11 +-
.../natefinch/lumberjack.v2/chown_linux.go | 6 +-
.../natefinch/lumberjack.v2/lumberjack.go | 16 +-
.../admissionregistration/v1/generated.proto | 4 +-
.../api/admissionregistration/v1/types.go | 4 +-
.../v1/types_swagger_doc_generated.go | 4 +-
.../v1alpha1/generated.pb.go | 541 +-
.../v1alpha1/generated.proto | 92 +-
.../admissionregistration/v1alpha1/types.go | 105 +-
.../v1alpha1/types_swagger_doc_generated.go | 25 +-
.../v1alpha1/zz_generated.deepcopy.go | 33 +-
.../v1beta1/generated.pb.go | 5927 ++++++++++++++---
.../v1beta1/generated.proto | 564 +-
.../admissionregistration/v1beta1/register.go | 4 +
.../admissionregistration/v1beta1/types.go | 594 +-
.../v1beta1/types_swagger_doc_generated.go | 178 +-
.../v1beta1/zz_generated.deepcopy.go | 448 +-
.../zz_generated.prerelease-lifecycle.go | 72 +
.../v1alpha1/generated.pb.go | 148 +-
.../v1alpha1/generated.proto | 5 +
.../api/apiserverinternal/v1alpha1/types.go | 5 +
.../v1alpha1/types_swagger_doc_generated.go | 1 +
.../v1alpha1/zz_generated.deepcopy.go | 5 +
vendor/k8s.io/api/apps/v1/types.go | 3 +-
.../api/authentication/v1/generated.pb.go | 511 +-
.../api/authentication/v1/generated.proto | 20 +
.../k8s.io/api/authentication/v1/register.go | 1 +
vendor/k8s.io/api/authentication/v1/types.go | 25 +
.../v1/types_swagger_doc_generated.go | 19 +
.../v1/zz_generated.deepcopy.go | 44 +
vendor/k8s.io/api/batch/v1/generated.pb.go | 398 +-
vendor/k8s.io/api/batch/v1/generated.proto | 63 +-
vendor/k8s.io/api/batch/v1/types.go | 93 +-
.../batch/v1/types_swagger_doc_generated.go | 7 +-
.../api/batch/v1/zz_generated.deepcopy.go | 25 +
.../api/core/v1/annotation_key_constants.go | 6 +-
vendor/k8s.io/api/core/v1/generated.pb.go | 3017 +++++----
vendor/k8s.io/api/core/v1/generated.proto | 193 +-
vendor/k8s.io/api/core/v1/types.go | 235 +-
.../core/v1/types_swagger_doc_generated.go | 66 +-
.../k8s.io/api/core/v1/well_known_labels.go | 4 +
.../api/core/v1/zz_generated.deepcopy.go | 75 +-
.../k8s.io/api/discovery/v1/generated.proto | 2 +
vendor/k8s.io/api/discovery/v1/types.go | 2 +
.../v1/types_swagger_doc_generated.go | 2 +-
.../api/extensions/v1beta1/generated.pb.go | 610 +-
.../api/extensions/v1beta1/generated.proto | 17 -
vendor/k8s.io/api/extensions/v1beta1/types.go | 50 +-
.../v1beta1/types_swagger_doc_generated.go | 10 -
.../v1beta1/zz_generated.deepcopy.go | 24 -
.../api/flowcontrol/v1alpha1/generated.pb.go | 477 +-
.../api/flowcontrol/v1alpha1/generated.proto | 42 +
.../k8s.io/api/flowcontrol/v1alpha1/types.go | 45 +
.../v1alpha1/types_swagger_doc_generated.go | 11 +
.../v1alpha1/zz_generated.deepcopy.go | 31 +
.../api/flowcontrol/v1beta1/generated.pb.go | 476 +-
.../api/flowcontrol/v1beta1/generated.proto | 42 +
.../k8s.io/api/flowcontrol/v1beta1/types.go | 49 +-
.../v1beta1/types_swagger_doc_generated.go | 11 +
.../v1beta1/zz_generated.deepcopy.go | 31 +
.../api/flowcontrol/v1beta2/generated.pb.go | 477 +-
.../api/flowcontrol/v1beta2/generated.proto | 42 +
.../k8s.io/api/flowcontrol/v1beta2/types.go | 49 +-
.../v1beta2/types_swagger_doc_generated.go | 11 +
.../v1beta2/zz_generated.deepcopy.go | 31 +
.../api/flowcontrol/v1beta3/generated.pb.go | 475 +-
.../api/flowcontrol/v1beta3/generated.proto | 46 +-
.../k8s.io/api/flowcontrol/v1beta3/types.go | 53 +-
.../v1beta3/types_swagger_doc_generated.go | 13 +-
.../v1beta3/zz_generated.deepcopy.go | 31 +
.../k8s.io/api/networking/v1/generated.pb.go | 443 +-
.../k8s.io/api/networking/v1/generated.proto | 17 -
vendor/k8s.io/api/networking/v1/types.go | 50 +-
.../v1/types_swagger_doc_generated.go | 10 -
.../networking/v1/zz_generated.deepcopy.go | 24 -
vendor/k8s.io/api/rbac/v1/generated.proto | 2 +
vendor/k8s.io/api/rbac/v1/types.go | 2 +
.../rbac/v1/types_swagger_doc_generated.go | 4 +-
.../pkg/apis/apiextensions/deepcopy.go | 8 +-
.../apis/apiextensions/types_jsonschema.go | 38 +
.../pkg/apis/apiextensions/v1/deepcopy.go | 8 +-
.../pkg/apis/apiextensions/v1/generated.pb.go | 474 +-
.../pkg/apis/apiextensions/v1/generated.proto | 20 +
.../apis/apiextensions/v1/types_jsonschema.go | 38 +
.../v1/zz_generated.conversion.go | 4 +
.../apiextensions/v1/zz_generated.deepcopy.go | 9 +-
.../apiextensions/zz_generated.deepcopy.go | 9 +-
.../pkg/features/OWNERS | 4 +
.../pkg/features/kube_features.go | 48 +
.../pkg/util/httpstream}/wsstream/conn.go | 0
.../pkg/util/httpstream}/wsstream/doc.go | 2 +-
.../pkg/util/httpstream}/wsstream/stream.go | 0
.../configuration/mutating_webhook_manager.go | 76 +-
.../validating_webhook_manager.go | 79 +-
.../pkg/admission/metrics/metrics.go | 67 +-
.../pkg/admission/plugin/cel/compile.go | 273 +-
.../pkg/admission/plugin/cel/composition.go | 198 +
.../pkg/admission/plugin/cel/filter.go | 81 +-
.../pkg/admission/plugin/cel/interface.go | 14 +-
.../validatingadmissionpolicy/admission.go | 10 +-
.../caching_authorizer.go | 133 +
.../validatingadmissionpolicy/controller.go | 443 +-
.../controller_reconcile.go | 292 +-
.../validatingadmissionpolicy/interface.go | 32 +-
.../validatingadmissionpolicy/matcher.go | 17 +-
.../matching/matching.go | 59 +-
.../validatingadmissionpolicy/typechecking.go | 330 +-
.../validatingadmissionpolicy/validator.go | 23 +-
.../pkg/admission/plugin/webhook/accessors.go | 29 +-
.../plugin/webhook/generic/webhook.go | 13 +-
.../webhook/matchconditions/interface.go | 3 +-
.../plugin/webhook/matchconditions/matcher.go | 23 +-
.../plugin/webhook/mutating/dispatcher.go | 22 +-
.../webhook/predicates/namespace/matcher.go | 6 +
.../plugin/webhook/validating/dispatcher.go | 27 +-
.../pkg/apis/flowcontrol/bootstrap/default.go | 4 +
vendor/k8s.io/apiserver/pkg/audit/context.go | 76 +-
vendor/k8s.io/apiserver/pkg/audit/request.go | 29 +-
.../request/websocket/protocol.go | 2 +-
.../token/cache/cached_token_authenticator.go | 9 +-
vendor/k8s.io/apiserver/pkg/cel/composited.go | 119 -
.../apiserver/pkg/cel/environment/base.go | 119 +
.../pkg/cel/environment/environment.go | 274 +
vendor/k8s.io/apiserver/pkg/cel/lazy/lazy.go | 191 +
.../k8s.io/apiserver/pkg/cel/library/authz.go | 62 +-
.../k8s.io/apiserver/pkg/cel/library/cost.go | 52 +-
.../apiserver/pkg/cel/library/libraries.go | 35 -
.../apiserver/pkg/cel/library/quantity.go | 375 ++
.../k8s.io/apiserver/pkg/cel/library/regex.go | 4 +-
.../k8s.io/apiserver/pkg/cel/library/test.go | 79 +
vendor/k8s.io/apiserver/pkg/cel/quantity.go | 76 +
vendor/k8s.io/apiserver/pkg/cel/registry.go | 79 -
vendor/k8s.io/apiserver/pkg/cel/types.go | 160 +-
.../endpoints/discovery/aggregated/handler.go | 2 +-
.../apiserver/pkg/endpoints/filters/audit.go | 13 +-
.../pkg/endpoints/filters/authentication.go | 44 +-
.../pkg/endpoints/filters/authn_audit.go | 4 +-
.../pkg/endpoints/filters/authorization.go | 18 +-
.../pkg/endpoints/filters/metrics.go | 47 +-
.../pkg/endpoints/filters/request_deadline.go | 4 +-
.../apiserver/pkg/endpoints/groupversion.go | 5 +
.../handlers/responsewriters/writers.go | 2 +-
.../apiserver/pkg/endpoints/handlers/watch.go | 4 +-
.../apiserver/pkg/endpoints/installer.go | 5 +-
.../pkg/endpoints/metrics/metrics.go | 2 +-
.../apiserver/pkg/features/kube_features.go | 85 +-
.../apiserver/pkg/registry/generic/OWNERS | 1 -
.../pkg/registry/generic/registry/dryrun.go | 22 +-
.../pkg/registry/generic/registry/store.go | 117 +-
vendor/k8s.io/apiserver/pkg/server/config.go | 25 +-
.../pkg/server/filters/maxinflight.go | 9 +-
.../server/filters/priority-and-fairness.go | 441 +-
.../apiserver/pkg/server/genericapiserver.go | 20 +-
vendor/k8s.io/apiserver/pkg/server/handler.go | 4 +-
.../apiserver/pkg/server/options/OWNERS | 2 +
.../apiserver/pkg/server/options/admission.go | 15 +-
.../apiserver/pkg/server/options/audit.go | 10 -
.../options/deprecated_insecure_serving.go | 4 +-
.../server/options/encryptionconfig/config.go | 286 +-
.../encryptionconfig/controller/controller.go | 10 +-
.../encryptionconfig/metrics/metrics.go | 86 +
.../apiserver/pkg/server/options/etcd.go | 194 +-
.../pkg/server/options/recommended.go | 20 +-
.../apiserver/pkg/server/options/serving.go | 2 +-
.../apiserver/pkg/server/routes/metrics.go | 2 +
.../apiserver/pkg/server/routes/openapi.go | 5 +-
.../apiserver/pkg/server/secure_serving.go | 5 +-
.../pkg/server/storage/storage_factory.go | 27 +-
vendor/k8s.io/apiserver/pkg/storage/OWNERS | 4 +-
.../apiserver/pkg/storage/cacher/cacher.go | 163 +-
.../pkg/storage/cacher/caching_object.go | 4 +
.../pkg/storage/cacher/lister_watcher.go | 77 +
.../pkg/storage/cacher/watch_cache.go | 34 +-
.../pkg/storage/cacher/watch_progress.go | 121 +
.../pkg/storage/etcd3/metrics/metrics.go | 103 +-
.../apiserver/pkg/storage/etcd3/store.go | 51 +-
.../apiserver/pkg/storage/etcd3/watcher.go | 14 +-
.../apiserver/pkg/storage/interfaces.go | 15 +
.../pkg/storage/storagebackend/OWNERS | 1 -
.../storage/storagebackend/factory/etcd3.go | 63 +-
.../storage/storagebackend/factory/factory.go | 14 +-
.../pkg/storage/value/encrypt/aes/aes.go | 86 +-
.../value/encrypt/aes/aes_extended_nonce.go | 186 +
.../pkg/storage/value/encrypt/aes/cache.go | 91 +
.../value/encrypt/envelope/kmsv2/cache.go | 22 +-
.../value/encrypt/envelope/kmsv2/envelope.go | 179 +-
.../value/encrypt/envelope/kmsv2/v2/api.pb.go | 96 +-
.../value/encrypt/envelope/kmsv2/v2/api.proto | 19 +-
.../apiserver/pkg/storage/value/metrics.go | 26 +-
.../pkg/storage/value/transformer.go | 53 +-
.../apiserver/pkg/storageversion/manager.go | 9 +-
.../apiserver/pkg/storageversion/updater.go | 11 +-
.../apiserver/pkg/util/flowcontrol/OWNERS | 4 +-
.../pkg/util/flowcontrol/apf_controller.go | 221 +-
.../util/flowcontrol/apf_controller_debug.go | 118 +-
.../pkg/util/flowcontrol/debug/dump.go | 22 +-
.../flowcontrol/dropped_requests_tracker.go | 234 +
.../util/flowcontrol/fairqueuing/interface.go | 12 +-
.../fairqueuing/queueset/queueset.go | 89 +-
.../flowcontrol/fairqueuing/queueset/types.go | 85 +-
.../pkg/util/flowcontrol/metrics/metrics.go | 50 +-
.../request/list_work_estimator.go | 20 +-
.../util/flowcontrol/request/seat_seconds.go | 2 +-
.../pkg/util/flowcontrol/request/width.go | 12 +
.../pkg/util/peerproxy/metrics/metrics.go | 56 +
.../pkg/util/webhook/authentication.go | 1 +
.../apiserver/pkg/util/webhook/webhook.go | 2 +-
.../client-go/discovery/discovery_client.go | 46 +-
.../k8s.io/cloud-provider/api/retry_error.go | 46 +
vendor/k8s.io/cloud-provider/cloud.go | 12 +-
vendor/k8s.io/cloud-provider/config/types.go | 2 +-
.../cloud-provider/names/controller_names.go | 69 +
.../cloud-provider/options/kubecloudshared.go | 8 +-
.../k8s.io/cloud-provider/options/options.go | 23 +-
.../component-base/logs/api/v1/options.go | 99 +-
.../component-base/logs/api/v1/registry.go | 10 +
.../component-base/logs/api/v1/types.go | 44 +-
.../logs/api/v1/zz_generated.deepcopy.go | 18 +
vendor/k8s.io/component-base/metrics/http.go | 18 +-
.../metrics/legacyregistry/registry.go | 8 +-
.../metrics/prometheus/feature/metrics.go | 2 +-
.../metrics/prometheus/slis/metrics.go | 5 +-
.../k8s.io/component-base/metrics/registry.go | 12 +-
.../metrics/testutil/testutil.go | 61 +
.../k8s.io/component-base/version/dynamic.go | 77 +
.../k8s.io/component-base/version/version.go | 2 +-
.../node/util/sysctl/sysctl.go | 100 -
.../corev1/nodeaffinity/nodeaffinity.go | 11 +-
.../storage/volume/helpers.go | 14 -
.../controller-manager/options/generic.go | 35 +-
.../pkg/leadermigration/config/default.go | 6 +-
vendor/k8s.io/kms/apis/v1beta1/api.pb.go | 50 +-
vendor/k8s.io/kms/apis/v1beta1/api.proto | 8 +-
vendor/k8s.io/kms/apis/v1beta1/v1beta1.go | 1 +
vendor/k8s.io/kms/apis/v2/api.pb.go | 1 -
vendor/k8s.io/kms/apis/v2/api.proto | 1 -
.../kubectl/pkg/util/podutils/podutils.go | 18 +-
.../kubernetes/pkg/api/service/warnings.go | 7 +
.../kubernetes/pkg/apis/autoscaling/OWNERS | 1 -
.../k8s.io/kubernetes/pkg/apis/batch/types.go | 92 +-
.../pkg/apis/batch/zz_generated.deepcopy.go | 25 +
.../pkg/apis/core/helper/helpers.go | 57 +-
.../kubernetes/pkg/apis/core/install/OWNERS | 1 -
.../kubernetes/pkg/apis/core/pods/helpers.go | 1 +
.../k8s.io/kubernetes/pkg/apis/core/types.go | 218 +-
.../k8s.io/kubernetes/pkg/apis/core/v1/OWNERS | 1 -
.../kubernetes/pkg/apis/core/v1/conversion.go | 1 +
.../kubernetes/pkg/apis/core/v1/defaults.go | 17 +-
.../pkg/apis/core/v1/helper/helpers.go | 6 -
.../apis/core/v1/zz_generated.conversion.go | 76 +-
.../pkg/apis/core/validation/OWNERS | 1 -
.../pkg/apis/core/validation/validation.go | 457 +-
.../pkg/apis/core/zz_generated.deepcopy.go | 75 +-
.../kubernetes/pkg/apis/extensions/OWNERS | 2 +-
.../kubernetes/pkg/apis/networking/types.go | 41 -
.../apis/networking/zz_generated.deepcopy.go | 24 -
.../pkg/controller/controller_ref_manager.go | 12 +-
.../pkg/controller/controller_utils.go | 137 +-
.../deployment/util/deployment_util.go | 12 +-
.../kubernetes/pkg/controller/lookup_cache.go | 92 -
.../kubernetes/pkg/features/kube_features.go | 361 +-
.../kubernetes/pkg/kubelet/apis/config/OWNERS | 9 -
.../kubernetes/pkg/kubelet/apis/config/doc.go | 20 -
.../pkg/kubelet/apis/config/helpers.go | 31 -
.../pkg/kubelet/apis/config/register.go | 45 -
.../pkg/kubelet/apis/config/types.go | 661 --
.../apis/config/zz_generated.deepcopy.go | 479 --
.../kubernetes/pkg/kubelet/events/event.go | 1 +
.../kubernetes/pkg/kubelet/util/format/pod.go | 41 -
.../kubernetes/pkg/proxy/util/endpoints.go | 64 -
.../kubernetes/pkg/proxy/util/network.go | 39 -
.../pkg/proxy/util/nodeport_addresses.go | 127 -
.../k8s.io/kubernetes/pkg/proxy/util/utils.go | 503 --
.../k8s.io/kubernetes/pkg/util/hash/hash.go | 11 +-
.../kubernetes/pkg/util/parsers/parsers.go | 2 +-
.../k8s.io/kubernetes/pkg/util/slice/slice.go | 75 -
.../k8s.io/kubernetes/pkg/volume/plugins.go | 10 +-
.../pkg/volume/util/atomic_writer.go | 9 +-
.../fsquota/common/quota_common_linux_impl.go | 3 +-
.../pkg/volume/util/fsquota/project.go | 3 +-
.../pkg/volume/util/fsquota/quota_linux.go | 9 +-
.../kubernetes/pkg/volume/util/io_util.go | 2 +-
.../kubernetes/pkg/volume/util/resize_util.go | 52 +-
.../pkg/volume/util/storageclass.go | 2 +-
.../k8s.io/kubernetes/pkg/volume/util/util.go | 9 +-
.../volumepathhandler/volume_path_handler.go | 5 +-
.../volume_path_handler_linux.go | 3 +-
.../test/e2e/framework/.import-restrictions | 9 +-
.../e2e/framework/config/.import-restrictions | 7 +-
.../framework/deployment/.import-restrictions | 7 +-
.../test/e2e/framework/deployment/fixtures.go | 9 +-
.../e2e/framework/events/.import-restrictions | 7 +-
.../kubernetes/test/e2e/framework/expect.go | 6 +-
.../test/e2e/framework/framework.go | 26 +-
.../framework/kubectl/.import-restrictions | 7 +-
.../test/e2e/framework/kubectl/builder.go | 2 +-
.../e2e/framework/node/.import-restrictions | 7 +-
.../test/e2e/framework/node/resource.go | 4 +-
.../e2e/framework/pod/.import-restrictions | 7 +-
.../test/e2e/framework/pod/create.go | 27 +-
.../test/e2e/framework/pod/output/output.go | 4 +-
.../test/e2e/framework/pod/pod_client.go | 61 +-
.../test/e2e/framework/pod/utils.go | 20 +-
.../kubernetes/test/e2e/framework/pod/wait.go | 12 +-
.../kubernetes/test/e2e/framework/pv/pv.go | 2 +-
.../e2e/framework/ssh/.import-restrictions | 7 +-
.../test/e2e/framework/test_context.go | 18 +-
.../framework/testfiles/.import-restrictions | 7 +-
.../kubernetes/test/e2e/framework/util.go | 70 +-
.../test/e2e/framework/volume/fixtures.go | 20 +-
.../kubernetes/test/utils/deployment.go | 11 +-
.../kubernetes/test/utils/image/manifest.go | 18 +-
vendor/k8s.io/kubernetes/test/utils/paths.go | 12 +-
.../k8s.io/kubernetes/test/utils/runners.go | 49 +-
.../k8s.io/mount-utils/mount_helper_unix.go | 55 +-
vendor/k8s.io/mount-utils/mount_linux.go | 9 +-
vendor/k8s.io/mount-utils/mount_windows.go | 22 +-
vendor/k8s.io/mount-utils/resizefs_linux.go | 3 +-
vendor/modules.txt | 154 +-
468 files changed, 41421 insertions(+), 10527 deletions(-)
create mode 100644 log.txt
create mode 100644 vendor/github.com/Microsoft/go-winio/.gitattributes
create mode 100644 vendor/github.com/Microsoft/go-winio/.golangci.yml
create mode 100644 vendor/github.com/Microsoft/go-winio/SECURITY.md
create mode 100644 vendor/github.com/Microsoft/go-winio/doc.go
create mode 100644 vendor/github.com/Microsoft/go-winio/internal/socket/rawaddr.go
create mode 100644 vendor/github.com/Microsoft/go-winio/internal/socket/socket.go
create mode 100644 vendor/github.com/Microsoft/go-winio/internal/socket/zsyscall_windows.go
create mode 100644 vendor/github.com/Microsoft/go-winio/pkg/guid/guid_nonwindows.go
create mode 100644 vendor/github.com/Microsoft/go-winio/pkg/guid/guid_windows.go
create mode 100644 vendor/github.com/Microsoft/go-winio/pkg/guid/variant_string.go
create mode 100644 vendor/github.com/Microsoft/go-winio/tools.go
create mode 100644 vendor/go.etcd.io/etcd/client/pkg/v3/tlsutil/versions.go
delete mode 100644 vendor/go.uber.org/atomic/.travis.yml
create mode 100644 vendor/go.uber.org/atomic/float32.go
create mode 100644 vendor/go.uber.org/atomic/float32_ext.go
create mode 100644 vendor/go.uber.org/atomic/pointer_go118.go
create mode 100644 vendor/go.uber.org/atomic/pointer_go119.go
create mode 100644 vendor/go.uber.org/atomic/time.go
create mode 100644 vendor/go.uber.org/atomic/time_ext.go
create mode 100644 vendor/go.uber.org/atomic/uintptr.go
create mode 100644 vendor/go.uber.org/atomic/unsafe_pointer.go
delete mode 100644 vendor/go.uber.org/multierr/.travis.yml
create mode 100644 vendor/go.uber.org/multierr/error_post_go120.go
rename vendor/go.uber.org/multierr/{go113.go => error_pre_go120.go} (66%)
delete mode 100644 vendor/go.uber.org/multierr/glide.yaml
create mode 100644 vendor/golang.org/x/crypto/hkdf/hkdf.go
create mode 100644 vendor/golang.org/x/mod/LICENSE
create mode 100644 vendor/golang.org/x/mod/PATENTS
create mode 100644 vendor/golang.org/x/mod/semver/semver.go
create mode 100644 vendor/golang.org/x/tools/cmd/stringer/stringer.go
create mode 100644 vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go
create mode 100644 vendor/golang.org/x/tools/go/gcexportdata/importer.go
create mode 100644 vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go
create mode 100644 vendor/golang.org/x/tools/go/packages/doc.go
create mode 100644 vendor/golang.org/x/tools/go/packages/external.go
create mode 100644 vendor/golang.org/x/tools/go/packages/golist.go
create mode 100644 vendor/golang.org/x/tools/go/packages/golist_overlay.go
create mode 100644 vendor/golang.org/x/tools/go/packages/loadmode_string.go
create mode 100644 vendor/golang.org/x/tools/go/packages/packages.go
create mode 100644 vendor/golang.org/x/tools/go/packages/visit.go
create mode 100644 vendor/golang.org/x/tools/go/types/objectpath/objectpath.go
create mode 100644 vendor/golang.org/x/tools/internal/event/core/event.go
create mode 100644 vendor/golang.org/x/tools/internal/event/core/export.go
create mode 100644 vendor/golang.org/x/tools/internal/event/core/fast.go
create mode 100644 vendor/golang.org/x/tools/internal/event/doc.go
create mode 100644 vendor/golang.org/x/tools/internal/event/event.go
create mode 100644 vendor/golang.org/x/tools/internal/event/keys/keys.go
create mode 100644 vendor/golang.org/x/tools/internal/event/keys/standard.go
create mode 100644 vendor/golang.org/x/tools/internal/event/keys/util.go
create mode 100644 vendor/golang.org/x/tools/internal/event/label/label.go
create mode 100644 vendor/golang.org/x/tools/internal/event/tag/tag.go
create mode 100644 vendor/golang.org/x/tools/internal/gcimporter/bimport.go
create mode 100644 vendor/golang.org/x/tools/internal/gcimporter/exportdata.go
create mode 100644 vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go
create mode 100644 vendor/golang.org/x/tools/internal/gcimporter/iexport.go
create mode 100644 vendor/golang.org/x/tools/internal/gcimporter/iimport.go
create mode 100644 vendor/golang.org/x/tools/internal/gcimporter/newInterface10.go
create mode 100644 vendor/golang.org/x/tools/internal/gcimporter/newInterface11.go
create mode 100644 vendor/golang.org/x/tools/internal/gcimporter/support_go117.go
create mode 100644 vendor/golang.org/x/tools/internal/gcimporter/support_go118.go
create mode 100644 vendor/golang.org/x/tools/internal/gcimporter/unified_no.go
create mode 100644 vendor/golang.org/x/tools/internal/gcimporter/unified_yes.go
create mode 100644 vendor/golang.org/x/tools/internal/gcimporter/ureader_no.go
create mode 100644 vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go
create mode 100644 vendor/golang.org/x/tools/internal/gocommand/invoke.go
create mode 100644 vendor/golang.org/x/tools/internal/gocommand/vendor.go
create mode 100644 vendor/golang.org/x/tools/internal/gocommand/version.go
create mode 100644 vendor/golang.org/x/tools/internal/packagesinternal/packages.go
create mode 100644 vendor/golang.org/x/tools/internal/pkgbits/codes.go
create mode 100644 vendor/golang.org/x/tools/internal/pkgbits/decoder.go
create mode 100644 vendor/golang.org/x/tools/internal/pkgbits/doc.go
create mode 100644 vendor/golang.org/x/tools/internal/pkgbits/encoder.go
create mode 100644 vendor/golang.org/x/tools/internal/pkgbits/flags.go
create mode 100644 vendor/golang.org/x/tools/internal/pkgbits/frames_go1.go
create mode 100644 vendor/golang.org/x/tools/internal/pkgbits/frames_go17.go
create mode 100644 vendor/golang.org/x/tools/internal/pkgbits/reloc.go
create mode 100644 vendor/golang.org/x/tools/internal/pkgbits/support.go
create mode 100644 vendor/golang.org/x/tools/internal/pkgbits/sync.go
create mode 100644 vendor/golang.org/x/tools/internal/pkgbits/syncmarker_string.go
create mode 100644 vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go
create mode 100644 vendor/golang.org/x/tools/internal/typeparams/common.go
create mode 100644 vendor/golang.org/x/tools/internal/typeparams/coretype.go
create mode 100644 vendor/golang.org/x/tools/internal/typeparams/normalize.go
create mode 100644 vendor/golang.org/x/tools/internal/typeparams/termlist.go
create mode 100644 vendor/golang.org/x/tools/internal/typeparams/typeterm.go
create mode 100644 vendor/golang.org/x/tools/internal/typesinternal/errorcode.go
create mode 100644 vendor/golang.org/x/tools/internal/typesinternal/errorcode_string.go
create mode 100644 vendor/golang.org/x/tools/internal/typesinternal/types.go
create mode 100644 vendor/golang.org/x/tools/internal/typesinternal/types_118.go
create mode 100644 vendor/golang.org/x/tools/internal/versions/gover.go
create mode 100644 vendor/golang.org/x/tools/internal/versions/types.go
create mode 100644 vendor/golang.org/x/tools/internal/versions/types_go121.go
create mode 100644 vendor/golang.org/x/tools/internal/versions/types_go122.go
create mode 100644 vendor/golang.org/x/tools/internal/versions/versions_go121.go
create mode 100644 vendor/golang.org/x/tools/internal/versions/versions_go122.go
create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/features/OWNERS
create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/features/kube_features.go
rename vendor/k8s.io/{apiserver/pkg/util => apimachinery/pkg/util/httpstream}/wsstream/conn.go (100%)
rename vendor/k8s.io/{apiserver/pkg/util => apimachinery/pkg/util/httpstream}/wsstream/doc.go (91%)
rename vendor/k8s.io/{apiserver/pkg/util => apimachinery/pkg/util/httpstream}/wsstream/stream.go (100%)
create mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugin/cel/composition.go
create mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugin/validatingadmissionpolicy/caching_authorizer.go
delete mode 100644 vendor/k8s.io/apiserver/pkg/cel/composited.go
create mode 100644 vendor/k8s.io/apiserver/pkg/cel/environment/base.go
create mode 100644 vendor/k8s.io/apiserver/pkg/cel/environment/environment.go
create mode 100644 vendor/k8s.io/apiserver/pkg/cel/lazy/lazy.go
delete mode 100644 vendor/k8s.io/apiserver/pkg/cel/library/libraries.go
create mode 100644 vendor/k8s.io/apiserver/pkg/cel/library/quantity.go
create mode 100644 vendor/k8s.io/apiserver/pkg/cel/library/test.go
create mode 100644 vendor/k8s.io/apiserver/pkg/cel/quantity.go
delete mode 100644 vendor/k8s.io/apiserver/pkg/cel/registry.go
create mode 100644 vendor/k8s.io/apiserver/pkg/server/options/encryptionconfig/metrics/metrics.go
create mode 100644 vendor/k8s.io/apiserver/pkg/storage/cacher/lister_watcher.go
create mode 100644 vendor/k8s.io/apiserver/pkg/storage/cacher/watch_progress.go
create mode 100644 vendor/k8s.io/apiserver/pkg/storage/value/encrypt/aes/aes_extended_nonce.go
create mode 100644 vendor/k8s.io/apiserver/pkg/storage/value/encrypt/aes/cache.go
create mode 100644 vendor/k8s.io/apiserver/pkg/util/flowcontrol/dropped_requests_tracker.go
create mode 100644 vendor/k8s.io/apiserver/pkg/util/peerproxy/metrics/metrics.go
create mode 100644 vendor/k8s.io/cloud-provider/api/retry_error.go
create mode 100644 vendor/k8s.io/cloud-provider/names/controller_names.go
create mode 100644 vendor/k8s.io/component-base/version/dynamic.go
delete mode 100644 vendor/k8s.io/component-helpers/node/util/sysctl/sysctl.go
delete mode 100644 vendor/k8s.io/kubernetes/pkg/controller/lookup_cache.go
delete mode 100644 vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/OWNERS
delete mode 100644 vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/doc.go
delete mode 100644 vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/helpers.go
delete mode 100644 vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/register.go
delete mode 100644 vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/types.go
delete mode 100644 vendor/k8s.io/kubernetes/pkg/kubelet/apis/config/zz_generated.deepcopy.go
delete mode 100644 vendor/k8s.io/kubernetes/pkg/kubelet/util/format/pod.go
delete mode 100644 vendor/k8s.io/kubernetes/pkg/proxy/util/endpoints.go
delete mode 100644 vendor/k8s.io/kubernetes/pkg/proxy/util/network.go
delete mode 100644 vendor/k8s.io/kubernetes/pkg/proxy/util/nodeport_addresses.go
delete mode 100644 vendor/k8s.io/kubernetes/pkg/proxy/util/utils.go
delete mode 100644 vendor/k8s.io/kubernetes/pkg/util/slice/slice.go
diff --git a/go.mod b/go.mod
index 353986e1d3..da0b9f6b25 100644
--- a/go.mod
+++ b/go.mod
@@ -27,13 +27,13 @@ require (
golang.org/x/net v0.24.0
google.golang.org/grpc v1.59.0
google.golang.org/protobuf v1.33.0
- k8s.io/api v0.28.0
+ k8s.io/api v0.28.2
k8s.io/apimachinery v0.28.2
- k8s.io/cloud-provider v0.27.13
- k8s.io/component-base v0.28.0
+ k8s.io/cloud-provider v0.28.1
+ k8s.io/component-base v0.28.1
k8s.io/klog/v2 v2.100.1
- k8s.io/kubernetes v1.27.13
- k8s.io/mount-utils v0.27.6
+ k8s.io/kubernetes v1.28.2
+ k8s.io/mount-utils v0.28.2
k8s.io/utils v0.0.0-20230505201702-9f6742963106
sigs.k8s.io/cloud-provider-azure v1.27.1-0.20230629023444-76504759ed59
sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.0-20231012171618-1890d8703623
@@ -43,7 +43,7 @@ require (
require (
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0 // indirect
golang.org/x/sys v0.19.0
- k8s.io/client-go v0.28.0
+ k8s.io/client-go v0.28.2
k8s.io/pod-security-admission v0.27.13
)
@@ -58,7 +58,7 @@ require (
github.com/Azure/go-autorest/logger v0.2.1 // indirect
github.com/Azure/go-autorest/tracing v0.6.0 // indirect
github.com/AzureAD/microsoft-authentication-library-for-go v1.1.1 // indirect
- github.com/Microsoft/go-winio v0.4.17 // indirect
+ github.com/Microsoft/go-winio v0.6.0 // indirect
github.com/NYTimes/gziphandler v1.1.1 // indirect
github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df // indirect
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a // indirect
@@ -66,10 +66,10 @@ require (
github.com/blang/semver/v4 v4.0.0 // indirect
github.com/cenkalti/backoff/v4 v4.2.1 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect
- github.com/coreos/go-semver v0.3.0 // indirect
- github.com/coreos/go-systemd/v22 v22.4.0 // indirect
+ github.com/coreos/go-semver v0.3.1 // indirect
+ github.com/coreos/go-systemd/v22 v22.5.0 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
- github.com/docker/distribution v2.8.1+incompatible // indirect
+ github.com/docker/distribution v2.8.2+incompatible // indirect
github.com/emicklei/go-restful/v3 v3.10.2 // indirect
github.com/evanphx/json-patch v5.6.0+incompatible // indirect
github.com/felixge/httpsnoop v1.0.3 // indirect
@@ -114,9 +114,9 @@ require (
github.com/spf13/cobra v1.7.0 // indirect
github.com/spf13/pflag v1.0.5 // indirect
github.com/stoewer/go-strcase v1.2.0 // indirect
- go.etcd.io/etcd/api/v3 v3.5.7 // indirect
- go.etcd.io/etcd/client/pkg/v3 v3.5.7 // indirect
- go.etcd.io/etcd/client/v3 v3.5.7 // indirect
+ go.etcd.io/etcd/api/v3 v3.5.9 // indirect
+ go.etcd.io/etcd/client/pkg/v3 v3.5.9 // indirect
+ go.etcd.io/etcd/client/v3 v3.5.9 // indirect
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 // indirect
go.opentelemetry.io/otel v1.21.0 // indirect
@@ -126,11 +126,12 @@ require (
go.opentelemetry.io/otel/sdk v1.20.0 // indirect
go.opentelemetry.io/otel/trace v1.21.0 // indirect
go.opentelemetry.io/proto/otlp v1.0.0 // indirect
- go.uber.org/atomic v1.7.0 // indirect
- go.uber.org/multierr v1.6.0 // indirect
+ go.uber.org/atomic v1.10.0 // indirect
+ go.uber.org/multierr v1.11.0 // indirect
go.uber.org/zap v1.19.0 // indirect
golang.org/x/crypto v0.22.0 // indirect
golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e // indirect
+ golang.org/x/mod v0.14.0 // indirect
golang.org/x/oauth2 v0.11.0 // indirect
golang.org/x/sync v0.5.0 // indirect
golang.org/x/term v0.19.0 // indirect
@@ -142,14 +143,14 @@ require (
google.golang.org/genproto/googleapis/api v0.0.0-20231016165738-49dd2c1f3d0b // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20231106174013-bbf56f31fb17 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
- gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect
+ gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
k8s.io/apiextensions-apiserver v0.0.0 // indirect
- k8s.io/apiserver v0.27.13 // indirect
- k8s.io/component-helpers v0.27.13 // indirect
- k8s.io/controller-manager v0.27.13 // indirect
- k8s.io/kms v0.27.13 // indirect
+ k8s.io/apiserver v0.28.1 // indirect
+ k8s.io/component-helpers v0.28.1 // indirect
+ k8s.io/controller-manager v0.28.1 // indirect
+ k8s.io/kms v0.28.1 // indirect
k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 // indirect
k8s.io/kubectl v0.0.0 // indirect
k8s.io/kubelet v0.27.13 // indirect
@@ -161,30 +162,30 @@ require (
replace (
github.com/niemeyer/pretty => github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e
go.etcd.io/etcd => go.etcd.io/etcd v0.0.0-20200410171415-59f5fb25a533
- k8s.io/api => k8s.io/api v0.27.13
- k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.27.13
- k8s.io/apimachinery => k8s.io/apimachinery v0.28.2
- k8s.io/apiserver => k8s.io/apiserver v0.27.13
- k8s.io/cli-runtime => k8s.io/cli-runtime v0.27.13
- k8s.io/cloud-provider => k8s.io/cloud-provider v0.27.13
- k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.27.13
- k8s.io/code-generator => k8s.io/code-generator v0.27.13
- k8s.io/component-base => k8s.io/component-base v0.27.13
- k8s.io/component-helpers => k8s.io/component-helpers v0.27.13
- k8s.io/controller-manager => k8s.io/controller-manager v0.27.13
- k8s.io/cri-api => k8s.io/cri-api v0.27.13
- k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.27.13
- k8s.io/dynamic-resource-allocation => k8s.io/dynamic-resource-allocation v0.27.13
- k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.27.13
- k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.27.13
- k8s.io/kube-proxy => k8s.io/kube-proxy v0.27.13
- k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.27.13
- k8s.io/kubectl => k8s.io/kubectl v0.27.13
- k8s.io/kubelet => k8s.io/kubelet v0.27.13
- k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.27.13
- k8s.io/metrics => k8s.io/metrics v0.27.13
- k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.27.13
- k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.27.13
- k8s.io/sample-cli-plugin => k8s.io/sample-cli-plugin v0.27.13
- k8s.io/sample-controller => k8s.io/sample-controller v0.27.13
+ k8s.io/api => k8s.io/api v0.28.1
+ k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.28.1
+ k8s.io/apimachinery => k8s.io/apimachinery v0.28.1
+ k8s.io/apiserver => k8s.io/apiserver v0.28.1
+ k8s.io/cli-runtime => k8s.io/cli-runtime v0.28.1
+ k8s.io/cloud-provider => k8s.io/cloud-provider v0.28.1
+ k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.28.1
+ k8s.io/code-generator => k8s.io/code-generator v0.28.1
+ k8s.io/component-base => k8s.io/component-base v0.28.1
+ k8s.io/component-helpers => k8s.io/component-helpers v0.28.1
+ k8s.io/controller-manager => k8s.io/controller-manager v0.28.1
+ k8s.io/cri-api => k8s.io/cri-api v0.28.1
+ k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.28.1
+ k8s.io/dynamic-resource-allocation => k8s.io/dynamic-resource-allocation v0.28.1
+ k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.28.1
+ k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.28.1
+ k8s.io/kube-proxy => k8s.io/kube-proxy v0.28.1
+ k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.28.1
+ k8s.io/kubectl => k8s.io/kubectl v0.28.1
+ k8s.io/kubelet => k8s.io/kubelet v0.28.1
+ k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.28.1
+ k8s.io/metrics => k8s.io/metrics v0.28.1
+ k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.28.1
+ k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.28.1
+ k8s.io/sample-cli-plugin => k8s.io/sample-cli-plugin v0.28.1
+ k8s.io/sample-controller => k8s.io/sample-controller v0.28.1
)
diff --git a/go.sum b/go.sum
index 60eb513fa4..73abf2b32c 100644
--- a/go.sum
+++ b/go.sum
@@ -67,12 +67,11 @@ github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUM
github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
github.com/AzureAD/microsoft-authentication-library-for-go v1.1.1 h1:WpB/QDNLpMw72xHJc34BNNykqSOeEJDAWkhf0u12/Jk=
github.com/AzureAD/microsoft-authentication-library-for-go v1.1.1/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI=
-github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0=
-github.com/Microsoft/go-winio v0.4.17 h1:iT12IBVClFevaf8PuVyi3UmZOVh4OqnaLxDTW2O6j3w=
-github.com/Microsoft/go-winio v0.4.17/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
+github.com/Microsoft/go-winio v0.6.0 h1:slsWYD/zyx7lCXoZVlvQrj0hPTM1HI4+v1sIda2yDvg=
+github.com/Microsoft/go-winio v0.6.0/go.mod h1:cTAf44im0RAYeL23bpB+fzCyDH2MJiz2BO69KH/soAE=
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I=
github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c=
@@ -101,10 +100,10 @@ github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnht
github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 h1:/inchEIKaYC1Akx+H+gqO04wryn5h75LSazbRlnya1k=
github.com/container-storage-interface/spec v1.8.0 h1:D0vhF3PLIZwlwZEf2eNbpujGCNwspwTYf2idJRJx4xI=
github.com/container-storage-interface/spec v1.8.0/go.mod h1:ROLik+GhPslwwWRNFF1KasPzroNARibH2rfz1rkg4H0=
-github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM=
-github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
-github.com/coreos/go-systemd/v22 v22.4.0 h1:y9YHcjnjynCd/DVbg5j9L/33jQM3MxJlbj/zWskzfGU=
-github.com/coreos/go-systemd/v22 v22.4.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
+github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4=
+github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec=
+github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
+github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
@@ -112,10 +111,10 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI=
-github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68=
-github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
+github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8=
+github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
-github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=
+github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
github.com/emicklei/go-restful/v3 v3.8.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
github.com/emicklei/go-restful/v3 v3.10.2 h1:hIovbnmBTLjHXkqEBUz3HGpXZdM7ZrE9fJIZIqlJLqE=
@@ -126,7 +125,6 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.m
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA=
github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
-github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U=
github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk=
@@ -200,7 +198,6 @@ github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ
github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4=
github.com/google/cel-go v0.16.1 h1:3hZfSNiAU3KOiNtxuFXVp5WFy4hf/Ly3Sa4/7F8SXNo=
github.com/google/cel-go v0.16.1/go.mod h1:HXZKzB0LXqer5lHHgfWAnlYwJaQBDKMjxjulNQzhwhY=
-github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ=
github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I=
github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
@@ -268,7 +265,6 @@ github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxv
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
-github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
@@ -287,7 +283,6 @@ github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
-github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8=
github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c=
github.com/moby/sys/mountinfo v0.6.2 h1:BzJjoreD5BMFNmD9Rus6gdd1pLuecOFPt8wC+Vygl78=
@@ -363,14 +358,11 @@ github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO
github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg=
github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
-github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
+github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
-github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
-github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
-github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0=
github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js=
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
@@ -403,17 +395,17 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
-go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU=
-go.etcd.io/etcd/api/v3 v3.5.7 h1:sbcmosSVesNrWOJ58ZQFitHMdncusIifYcrBfwrlJSY=
-go.etcd.io/etcd/api/v3 v3.5.7/go.mod h1:9qew1gCdDDLu+VwmeG+iFpL+QlpHTo7iubavdVDgCAA=
-go.etcd.io/etcd/client/pkg/v3 v3.5.7 h1:y3kf5Gbp4e4q7egZdn5T7W9TSHUvkClN6u+Rq9mEOmg=
-go.etcd.io/etcd/client/pkg/v3 v3.5.7/go.mod h1:o0Abi1MK86iad3YrWhgUsbGx1pmTS+hrORWc2CamuhY=
-go.etcd.io/etcd/client/v2 v2.305.7 h1:AELPkjNR3/igjbO7CjyF1fPuVPjrblliiKj+Y6xSGOU=
-go.etcd.io/etcd/client/v3 v3.5.7 h1:u/OhpiuCgYY8awOHlhIhmGIGpxfBU/GZBUP3m/3/Iz4=
-go.etcd.io/etcd/client/v3 v3.5.7/go.mod h1:sOWmj9DZUMyAngS7QQwCyAXXAL6WhgTOPLNS/NabQgw=
-go.etcd.io/etcd/pkg/v3 v3.5.7 h1:obOzeVwerFwZ9trMWapU/VjDcYUJb5OfgC1zqEGWO/0=
-go.etcd.io/etcd/raft/v3 v3.5.7 h1:aN79qxLmV3SvIq84aNTliYGmjwsW6NqJSnqmI1HLJKc=
-go.etcd.io/etcd/server/v3 v3.5.7 h1:BTBD8IJUV7YFgsczZMHhMTS67XuA4KpRquL0MFOJGRk=
+go.etcd.io/bbolt v1.3.7 h1:j+zJOnnEjF/kyHlDDgGnVL/AIqIJPq8UoB2GSNfkUfQ=
+go.etcd.io/etcd/api/v3 v3.5.9 h1:4wSsluwyTbGGmyjJktOf3wFQoTBIURXHnq9n/G/JQHs=
+go.etcd.io/etcd/api/v3 v3.5.9/go.mod h1:uyAal843mC8uUVSLWz6eHa/d971iDGnCRpmKd2Z+X8k=
+go.etcd.io/etcd/client/pkg/v3 v3.5.9 h1:oidDC4+YEuSIQbsR94rY9gur91UPL6DnxDCIYd2IGsE=
+go.etcd.io/etcd/client/pkg/v3 v3.5.9/go.mod h1:y+CzeSmkMpWN2Jyu1npecjB9BBnABxGM4pN8cGuJeL4=
+go.etcd.io/etcd/client/v2 v2.305.9 h1:YZ2OLi0OvR0H75AcgSUajjd5uqKDKocQUqROTG11jIo=
+go.etcd.io/etcd/client/v3 v3.5.9 h1:r5xghnU7CwbUxD/fbUtRyJGaYNfDun8sp/gTr1hew6E=
+go.etcd.io/etcd/client/v3 v3.5.9/go.mod h1:i/Eo5LrZ5IKqpbtpPDuaUnDOUv471oDg8cjQaUr2MbA=
+go.etcd.io/etcd/pkg/v3 v3.5.9 h1:6R2jg/aWd/zB9+9JxmijDKStGJAPFsX3e6BeJkMi6eQ=
+go.etcd.io/etcd/raft/v3 v3.5.9 h1:ZZ1GIHoUlHsn0QVqiRysAm3/81Xx7+i2d7nSdWxlOiI=
+go.etcd.io/etcd/server/v3 v3.5.9 h1:vomEmmxeztLtS5OEH7d0hBAg4cjVIu9wXuNzUZx2ZA0=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
@@ -435,12 +427,14 @@ go.opentelemetry.io/otel/trace v1.21.0 h1:WD9i5gzvoUPuXIXH24ZNBudiarZDKuekPqi/E8
go.opentelemetry.io/otel/trace v1.21.0/go.mod h1:LGbsEB0f9LGjN+OZaQQ26sohbOmiMR+BaslueVtS/qQ=
go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I=
go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM=
-go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw=
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
+go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ=
+go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
-go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4=
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
+go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
+go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
go.uber.org/zap v1.19.0 h1:mZQZefskPPCMIBCSEH0v2/iUqqLrYtaeqwD6FUGUnFE=
go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
@@ -454,10 +448,6 @@ golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0
golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio=
-golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc=
-golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
-golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
-golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs=
golang.org/x/crypto v0.22.0 h1:g1v0xeRhjcugydODzvb3mEM9SQ0HGp9s/nh3COQ/C30=
golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
@@ -492,7 +482,7 @@ golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
-golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
+golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0=
golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -521,7 +511,6 @@ golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug
golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
-golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws=
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
@@ -529,10 +518,6 @@ golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns=
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
golang.org/x/net v0.13.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA=
-golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk=
-golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U=
-golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
-golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w=
golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
@@ -552,7 +537,6 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE=
golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -568,7 +552,6 @@ golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -577,7 +560,6 @@ golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
@@ -599,10 +581,6 @@ golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
-golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
-golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o=
golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
@@ -616,10 +594,6 @@ golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY=
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o=
-golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU=
-golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0=
-golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
-golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58=
golang.org/x/term v0.19.0 h1:+ThwsDv+tYfnJFhF4L8jITxu1tdTWRTZpdsWgEgjL6Q=
golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@@ -635,7 +609,6 @@ golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
-golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@@ -676,7 +649,6 @@ golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ=
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s=
golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4=
-golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58=
golang.org/x/tools v0.16.1 h1:TLyB3WofjdOEepBHAU20JdNC1Zbg87elYofWYAY5oZA=
golang.org/x/tools v0.16.1/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@@ -706,7 +678,6 @@ google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98
google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
-google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20231030173426-d783a09b4405 h1:I6WNifs6pF9tNdSob2W24JtyxIYjzFB9qDlpUC76q+U=
google.golang.org/genproto v0.0.0-20231030173426-d783a09b4405/go.mod h1:3WDQMjmJk36UQhjQ89emUzb1mdaHcPeeAh4SCBKznB4=
google.golang.org/genproto/googleapis/api v0.0.0-20231016165738-49dd2c1f3d0b h1:CIC2YMXmIhYw6evmhPxBKJ4fmLbOFtXQN/GV3XOZR8k=
@@ -731,7 +702,6 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
@@ -750,8 +720,8 @@ gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
-gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8=
-gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
+gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc=
+gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
@@ -760,7 +730,6 @@ gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
@@ -769,53 +738,50 @@ honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWh
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
-k8s.io/api v0.27.13 h1:d49LYs1dh+JMMDNYQSu8FhEzCjc2TNpYvDWoSGAKs80=
-k8s.io/api v0.27.13/go.mod h1:W3lYMPs34i0XQA+cmKfejve+HwbRZjy67fL05RyJUTo=
-k8s.io/apiextensions-apiserver v0.27.13 h1:it32SCkrjzhimZasL++nsshG66m2O570y56R+xj1/WE=
-k8s.io/apiextensions-apiserver v0.27.13/go.mod h1:LkAz0+pjqr/92kPigX/B2sjsPhGCuG+hi8GyyjUNNsE=
-k8s.io/apimachinery v0.28.2 h1:KCOJLrc6gu+wV1BYgwik4AF4vXOlVJPdiqn0yAWWwXQ=
-k8s.io/apimachinery v0.28.2/go.mod h1:RdzF87y/ngqk9H4z3EL2Rppv5jj95vGS/HaFXrLDApU=
-k8s.io/apiserver v0.27.13 h1:Yf69zVdbuQVIMpz7N4dtntWsUklKpcFXGAdVh7vKOH4=
-k8s.io/apiserver v0.27.13/go.mod h1:XHth2MKAUdcLvdhPOwvDPbSyOrMev2vRqE05oUEC5Hk=
+k8s.io/api v0.28.1 h1:i+0O8k2NPBCPYaMB+uCkseEbawEt/eFaiRqUx8aB108=
+k8s.io/api v0.28.1/go.mod h1:uBYwID+66wiL28Kn2tBjBYQdEU0Xk0z5qF8bIBqk/Dg=
+k8s.io/apiextensions-apiserver v0.28.1 h1:l2ThkBRjrWpw4f24uq0Da2HaEgqJZ7pcgiEUTKSmQZw=
+k8s.io/apiextensions-apiserver v0.28.1/go.mod h1:sVvrI+P4vxh2YBBcm8n2ThjNyzU4BQGilCQ/JAY5kGs=
+k8s.io/apimachinery v0.28.1 h1:EJD40og3GizBSV3mkIoXQBsws32okPOy+MkRyzh6nPY=
+k8s.io/apimachinery v0.28.1/go.mod h1:X0xh/chESs2hP9koe+SdIAcXWcQ+RM5hy0ZynB+yEvw=
+k8s.io/apiserver v0.28.1 h1:dw2/NKauDZCnOUAzIo2hFhtBRUo6gQK832NV8kuDbGM=
+k8s.io/apiserver v0.28.1/go.mod h1:d8aizlSRB6yRgJ6PKfDkdwCy2DXt/d1FDR6iJN9kY1w=
k8s.io/client-go v0.19.0/go.mod h1:H9E/VT95blcFQnlyShFgnFT9ZnJOAceiUHM3MlRC+mU=
-k8s.io/client-go v0.28.0 h1:ebcPRDZsCjpj62+cMk1eGNX1QkMdRmQ6lmz5BLoFWeM=
-k8s.io/client-go v0.28.0/go.mod h1:0Asy9Xt3U98RypWJmU1ZrRAGKhP6NqDPmptlAzK2kMc=
-k8s.io/cloud-provider v0.27.13 h1:iOLPffa1PjCE9SpHIVrMfRQv8bBER+LGjJ3nbl69xkA=
-k8s.io/cloud-provider v0.27.13/go.mod h1:Z6hm5I+K/1yQK4myP/PdP0NorxfM9p8iPEhyGvznw0I=
-k8s.io/code-generator v0.27.13/go.mod h1:NmuMGweDQC7Ewx+c8zgbtVPLsy5r5Rs/+nQ7kuBwNbI=
-k8s.io/component-base v0.27.13 h1:JuDLZqD8L+Wu9nUNv9msjQjkh172Ag6crphzASEvlKo=
-k8s.io/component-base v0.27.13/go.mod h1:QKur/xRE4R25PhScEe3lAhrVSwEuZPGlFPyEDOaWCgU=
-k8s.io/component-helpers v0.27.13 h1:o2pj2ycWJw8f+Gjh66GrvSk6Oz1jsd9GNr/IqaclSyw=
-k8s.io/component-helpers v0.27.13/go.mod h1:fbi9kk3Y2lypTKOSiIpTmYGLj1CsVO7BmSIa0dq28ao=
-k8s.io/controller-manager v0.27.13 h1:/vw2Bd4glt10N+cRejsVLDkJNCS9zqecX9Imjwv7JZE=
-k8s.io/controller-manager v0.27.13/go.mod h1:sM53BuXjOHuL5CnKmynSDfmIGy/kPzA3o450uSziIn0=
-k8s.io/csi-translation-lib v0.27.13 h1:Fu1f/gsWGEqlwW7kYaDH65v9RU7T3+fSqQNk9WzSFgQ=
+k8s.io/client-go v0.28.2 h1:DNoYI1vGq0slMBN/SWKMZMw0Rq+0EQW6/AK4v9+3VeY=
+k8s.io/client-go v0.28.2/go.mod h1:sMkApowspLuc7omj1FOSUxSoqjr+d5Q0Yc0LOFnYFJY=
+k8s.io/cloud-provider v0.28.1 h1:bR7lIRYBHqxfsOkUsY2hJ7V7vmStxb0wjJJdrID8+7I=
+k8s.io/cloud-provider v0.28.1/go.mod h1:7jxsc3c15go606KLXnUq8Cy4nX1R1dxFRgn/czIJp/Q=
+k8s.io/code-generator v0.28.1/go.mod h1:ueeSJZJ61NHBa0ccWLey6mwawum25vX61nRZ6WOzN9A=
+k8s.io/component-base v0.28.1 h1:LA4AujMlK2mr0tZbQDZkjWbdhTV5bRyEyAFe0TJxlWg=
+k8s.io/component-base v0.28.1/go.mod h1:jI11OyhbX21Qtbav7JkhehyBsIRfnO8oEgoAR12ArIU=
+k8s.io/component-helpers v0.28.1 h1:ts/vykhyUmPLhUl/hdLdf+a4BWA0giQ3f25HAIhl+RI=
+k8s.io/component-helpers v0.28.1/go.mod h1:rHFPj33uXNbgppg+ilmjJ4oR73prZQNRRmg+utVOAb0=
+k8s.io/controller-manager v0.28.1 h1:+md/3DAsdLVoMe3AewhyTxljnPLE/gyshTDZ8sX4Rf0=
+k8s.io/controller-manager v0.28.1/go.mod h1:yZ8aOBpMYOBTAI/Jd0qpaUzZUlQigmtRcdYg2VgWKiU=
+k8s.io/csi-translation-lib v0.28.1 h1:6EdpqKbwgJEcLxRzcGR1GnbyJrcTcUMhHTYfMwFT3LA=
k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
k8s.io/gengo v0.0.0-20220902162205-c0856e24416d/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
-k8s.io/klog/v2 v2.90.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg=
k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
-k8s.io/kms v0.27.13 h1:mm0njb9VaDh2TNanqndUWGpe4c2elQbkPofoGPOalvM=
-k8s.io/kms v0.27.13/go.mod h1:g3rvv+YDJ5xVEanhlnliJw4a9vZ/SL/AVw0/yQNwbEA=
-k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f/go.mod h1:byini6yhqGC14c3ebc/QwanvYwhuMWF6yz2F8uwW8eg=
+k8s.io/kms v0.28.1 h1:QLNTIc0k7Yebkt9yobj9Y9qBoRCMB4dq+pFCxVXVBnY=
+k8s.io/kms v0.28.1/go.mod h1:I2TwA8oerDRInHWWBOqSUzv1EJDC1+55FQKYkxaPxh0=
k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 h1:LyMgNKD2P8Wn1iAwQU5OhxCKlKJy0sHc+PcDwFB24dQ=
k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9/go.mod h1:wZK2AVp1uHCp4VamDVgBP2COHZjqD1T68Rf0CM3YjSM=
-k8s.io/kubectl v0.27.13 h1:RuygTTIy/FFotjI5kBA/DRamU9kSbCjUVlqd8Ni/U94=
-k8s.io/kubectl v0.27.13/go.mod h1:wAoY7J5QWMX6xzeQ/rw9kZpadB4zUVePu1WzGv1GBsw=
-k8s.io/kubelet v0.27.13 h1:tQ4bG3gfValJZDkVBCrnWrLbBRRlE3rA8gtC9LgTAag=
-k8s.io/kubelet v0.27.13/go.mod h1:5QjeSJXimzRA+avdgsi3mSNyo09JH/6fmgGe2aOPRoA=
-k8s.io/kubernetes v1.27.13 h1:5NBz3aNy5Jwcgi+w2+rP4x5B0Wa21NXUJxDJwdUtxlY=
-k8s.io/kubernetes v1.27.13/go.mod h1:T4toI2XSWG5FJoq/H8q9eFYPymAxe/k4UnaC00uPnMs=
-k8s.io/mount-utils v0.27.6 h1:Y8TH5zcHLgHFHbwSZWMrbpDyad46+k2WnIgCfmwzqpk=
-k8s.io/mount-utils v0.27.6/go.mod h1:vmcjYdi2Vg1VTWY7KkhvwJVY6WDHxb/QQhiQKkR8iNs=
-k8s.io/pod-security-admission v0.27.13 h1:B3sSlxhQZiPuEj6dD8Mc1oyjWR3/nUVZ9qXTvD1BOj0=
-k8s.io/pod-security-admission v0.27.13/go.mod h1:d0ms/UYCmD0O28MA0MngozhKouM5xsKXFav4VGOpLr8=
+k8s.io/kubectl v0.28.1 h1:jAq4yKEqQL+fwkWcEsUWxhJ7uIRcOYQraJxx4SyAMTY=
+k8s.io/kubectl v0.28.1/go.mod h1:a0nk/lMMeKBulp0lMTJAKbkjZg1ykqfLfz/d6dnv1ak=
+k8s.io/kubelet v0.28.1 h1:QRfx+jrzNgkLnMSw/nxGkAN7cjHPO446MDbjPITxLkk=
+k8s.io/kubelet v0.28.1/go.mod h1:xYBbbJ0e2Rtb/hv+QFie448lFF81J990ImIptce2AHk=
+k8s.io/kubernetes v1.28.2 h1:GhcnYeNTukeaC0dD5BC+UWBvzQsFEpWj7XBVMQptfYc=
+k8s.io/kubernetes v1.28.2/go.mod h1:FmB1Mlp9ua0ezuwQCTGs/y6wj/fVisN2sVxhzjj0WDk=
+k8s.io/mount-utils v0.28.2 h1:sIdMH7fRhcU48V1oYJ9cLmLm/TG+2jLhhe8eS3I+FWg=
+k8s.io/mount-utils v0.28.2/go.mod h1:AyP8LmZSLgpGdFQr+vzHTerlPiGvXUdP99n98Er47jw=
+k8s.io/pod-security-admission v0.28.1 h1:d3jvo/+C6yDR1wnlX9ot1WvLyJ5R4uachJyxhdn9cW8=
+k8s.io/pod-security-admission v0.28.1/go.mod h1:Qm1rSy3l96m6QXGNU/8u+cmdpNdmAeA3OYDinrXhi6U=
k8s.io/utils v0.0.0-20200729134348-d5654de09c73/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
-k8s.io/utils v0.0.0-20230209194617-a36077c30491/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
k8s.io/utils v0.0.0-20230406110748-d93618cff8a2/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
k8s.io/utils v0.0.0-20230505201702-9f6742963106 h1:EObNQ3TW2D+WptiYXlApGNLVy0zm/JIBVY9i+M4wpAU=
k8s.io/utils v0.0.0-20230505201702-9f6742963106/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
diff --git a/log.txt b/log.txt
new file mode 100644
index 0000000000..6cedc512d3
--- /dev/null
+++ b/log.txt
@@ -0,0 +1,1737 @@
+? sigs.k8s.io/azuredisk-csi-driver/pkg/azureconstants [no test files]
+? sigs.k8s.io/azuredisk-csi-driver/pkg/azuredisk/mockcorev1 [no test files]
+=== RUN TestRescanAllVolumes
+--- PASS: TestRescanAllVolumes (0.00s)
+=== RUN TestFindDiskByLun
+W0618 07:26:59.709046 1076117 azure_common_linux.go:188] azureDisk - getDiskLinkByDevName by sdd under /dev/disk/azure/scsi1/ failed, error: read /dev/disk/azure/scsi1/ error: bad dir
+W0618 07:26:59.709117 1076117 azure_common_linux.go:188] azureDisk - getDiskLinkByDevName by sdd under /dev/disk/by-id/ failed, error: read /dev/disk/by-id/ error: bad dir
+--- PASS: TestFindDiskByLun (0.00s)
+=== RUN TestStrFirstLetterToUpper
+--- PASS: TestStrFirstLetterToUpper (0.00s)
+=== RUN TestNewDriverV1
+--- PASS: TestNewDriverV1 (0.00s)
+=== RUN TestCheckDiskCapacity
+I0618 07:26:59.716403 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_VOLUME
+I0618 07:26:59.716419 1076117 driver.go:81] Enabling controller service capability: PUBLISH_UNPUBLISH_VOLUME
+I0618 07:26:59.716425 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_SNAPSHOT
+I0618 07:26:59.716430 1076117 driver.go:81] Enabling controller service capability: LIST_SNAPSHOTS
+I0618 07:26:59.716435 1076117 driver.go:81] Enabling controller service capability: CLONE_VOLUME
+I0618 07:26:59.716440 1076117 driver.go:81] Enabling controller service capability: EXPAND_VOLUME
+I0618 07:26:59.716445 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES
+I0618 07:26:59.716450 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES_PUBLISHED_NODES
+I0618 07:26:59.716456 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_WRITER
+--- PASS: TestCheckDiskCapacity (0.01s)
+=== RUN TestRun
+=== RUN TestRun/Successful_run
+I0618 07:26:59.723128 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_VOLUME
+I0618 07:26:59.723143 1076117 driver.go:81] Enabling controller service capability: PUBLISH_UNPUBLISH_VOLUME
+I0618 07:26:59.723155 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_SNAPSHOT
+I0618 07:26:59.723161 1076117 driver.go:81] Enabling controller service capability: LIST_SNAPSHOTS
+I0618 07:26:59.723167 1076117 driver.go:81] Enabling controller service capability: CLONE_VOLUME
+I0618 07:26:59.723173 1076117 driver.go:81] Enabling controller service capability: EXPAND_VOLUME
+I0618 07:26:59.723178 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES
+I0618 07:26:59.723182 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES_PUBLISHED_NODES
+I0618 07:26:59.723189 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_WRITER
+I0618 07:26:59.723411 1076117 azuredisk.go:201]
+DRIVER INFORMATION:
+-------------------
+Build Date: N/A
+Compiler: gc
+Driver Name: disk.csi.azure.com
+Driver Version: N/A
+Git Commit: N/A
+Go Version: go1.21.4
+Platform: linux/amd64
+Topology Key: topology.disk.csi.azure.com/zone
+
+Streaming logs below:
+W0618 07:26:59.723434 1076117 azure_disk_utils.go:262] get kubeconfig() failed with error: unable to load in-cluster configuration, KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT must be defined
+I0618 07:26:59.740559 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_VOLUME
+I0618 07:26:59.740606 1076117 driver.go:81] Enabling controller service capability: PUBLISH_UNPUBLISH_VOLUME
+I0618 07:26:59.740614 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_SNAPSHOT
+I0618 07:26:59.740618 1076117 driver.go:81] Enabling controller service capability: CLONE_VOLUME
+I0618 07:26:59.740623 1076117 driver.go:81] Enabling controller service capability: EXPAND_VOLUME
+I0618 07:26:59.740629 1076117 driver.go:81] Enabling controller service capability: SINGLE_NODE_MULTI_WRITER
+I0618 07:26:59.740636 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_WRITER
+I0618 07:26:59.740643 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_READER_ONLY
+I0618 07:26:59.740649 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_SINGLE_WRITER
+I0618 07:26:59.740655 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_MULTI_WRITER
+I0618 07:26:59.741222 1076117 server.go:117] Listening for connections on address: &net.TCPAddr{IP:net.IP{0x7f, 0x0, 0x0, 0x1}, Port:38249, Zone:""}
+=== RUN TestRun/Successful_run_without_cloud_config
+I0618 07:26:59.758308 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_VOLUME
+I0618 07:26:59.758328 1076117 driver.go:81] Enabling controller service capability: PUBLISH_UNPUBLISH_VOLUME
+I0618 07:26:59.758334 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_SNAPSHOT
+I0618 07:26:59.758340 1076117 driver.go:81] Enabling controller service capability: LIST_SNAPSHOTS
+I0618 07:26:59.758346 1076117 driver.go:81] Enabling controller service capability: CLONE_VOLUME
+I0618 07:26:59.758351 1076117 driver.go:81] Enabling controller service capability: EXPAND_VOLUME
+I0618 07:26:59.758357 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES
+I0618 07:26:59.758366 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES_PUBLISHED_NODES
+I0618 07:26:59.758373 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_WRITER
+I0618 07:26:59.758615 1076117 azuredisk.go:201]
+DRIVER INFORMATION:
+-------------------
+Build Date: N/A
+Compiler: gc
+Driver Name: disk.csi.azure.com
+Driver Version: N/A
+Git Commit: N/A
+Go Version: go1.21.4
+Platform: linux/amd64
+Topology Key: topology.disk.csi.azure.com/zone
+
+Streaming logs below:
+W0618 07:26:59.758637 1076117 azure_disk_utils.go:262] get kubeconfig() failed with error: unable to load in-cluster configuration, KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT must be defined
+I0618 07:26:59.778645 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_VOLUME
+I0618 07:26:59.778666 1076117 driver.go:81] Enabling controller service capability: PUBLISH_UNPUBLISH_VOLUME
+I0618 07:26:59.778673 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_SNAPSHOT
+I0618 07:26:59.778678 1076117 driver.go:81] Enabling controller service capability: CLONE_VOLUME
+I0618 07:26:59.778683 1076117 driver.go:81] Enabling controller service capability: EXPAND_VOLUME
+I0618 07:26:59.778689 1076117 driver.go:81] Enabling controller service capability: SINGLE_NODE_MULTI_WRITER
+I0618 07:26:59.778695 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_WRITER
+I0618 07:26:59.778701 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_READER_ONLY
+I0618 07:26:59.778706 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_SINGLE_WRITER
+I0618 07:26:59.778711 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_MULTI_WRITER
+I0618 07:26:59.778866 1076117 server.go:117] Listening for connections on address: &net.TCPAddr{IP:net.IP{0x7f, 0x0, 0x0, 0x1}, Port:41979, Zone:""}
+=== RUN TestRun/Successful_run_with_node_ID_missing
+I0618 07:26:59.789504 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_VOLUME
+I0618 07:26:59.789526 1076117 driver.go:81] Enabling controller service capability: PUBLISH_UNPUBLISH_VOLUME
+I0618 07:26:59.789533 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_SNAPSHOT
+I0618 07:26:59.789538 1076117 driver.go:81] Enabling controller service capability: LIST_SNAPSHOTS
+I0618 07:26:59.789543 1076117 driver.go:81] Enabling controller service capability: CLONE_VOLUME
+I0618 07:26:59.789548 1076117 driver.go:81] Enabling controller service capability: EXPAND_VOLUME
+I0618 07:26:59.789553 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES
+I0618 07:26:59.789557 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES_PUBLISHED_NODES
+I0618 07:26:59.789563 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_WRITER
+I0618 07:26:59.789775 1076117 azuredisk.go:201]
+DRIVER INFORMATION:
+-------------------
+Build Date: N/A
+Compiler: gc
+Driver Name: disk.csi.azure.com
+Driver Version: N/A
+Git Commit: N/A
+Go Version: go1.21.4
+Platform: linux/amd64
+Topology Key: topology.disk.csi.azure.com/zone
+
+Streaming logs below:
+W0618 07:26:59.789792 1076117 azure_disk_utils.go:262] get kubeconfig() failed with error: unable to load in-cluster configuration, KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT must be defined
+I0618 07:26:59.797555 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_VOLUME
+I0618 07:26:59.797579 1076117 driver.go:81] Enabling controller service capability: PUBLISH_UNPUBLISH_VOLUME
+I0618 07:26:59.797586 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_SNAPSHOT
+I0618 07:26:59.797591 1076117 driver.go:81] Enabling controller service capability: CLONE_VOLUME
+I0618 07:26:59.797597 1076117 driver.go:81] Enabling controller service capability: EXPAND_VOLUME
+I0618 07:26:59.797603 1076117 driver.go:81] Enabling controller service capability: SINGLE_NODE_MULTI_WRITER
+I0618 07:26:59.797609 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_WRITER
+I0618 07:26:59.797614 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_READER_ONLY
+I0618 07:26:59.797620 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_SINGLE_WRITER
+I0618 07:26:59.797625 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_MULTI_WRITER
+I0618 07:26:59.797783 1076117 server.go:117] Listening for connections on address: &net.TCPAddr{IP:net.IP{0x7f, 0x0, 0x0, 0x1}, Port:34705, Zone:""}
+=== RUN TestRun/Successful_run_with_vmss_VMType
+I0618 07:26:59.798232 1076117 azuredisk.go:201]
+DRIVER INFORMATION:
+-------------------
+Build Date: N/A
+Compiler: gc
+Driver Name: disk.csi.azure.com
+Driver Version: N/A
+Git Commit: N/A
+Go Version: go1.21.4
+Platform: linux/amd64
+Topology Key: topology.disk.csi.azure.com/zone
+
+Streaming logs below:
+W0618 07:26:59.798259 1076117 azure_disk_utils.go:262] get kubeconfig() failed with error: unable to load in-cluster configuration, KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT must be defined
+E0618 07:27:00.178940 1076117 util.go:138] Send.sendRequest: error in parsing response body string "": unexpected end of JSON input, Skip retrying regional host
+E0618 07:27:00.179056 1076117 azure_standard.go:644] as.GetInstanceTypeByNodeName() failed: as.getVirtualMachine() err=Retriable: false, RetryAfter: 0s, HTTPStatusCode: 400, RawError: azure.BearerAuthorizer#WithAuthorization: Failed to refresh the Token for request to https://management.azure.com/subscriptions/12345/resourceGroups/rg1/providers/Microsoft.Compute/virtualMachines/?%24expand=instanceView&api-version=2022-03-01: StatusCode=400 -- Original Error: adal: Refresh request failed. Status Code = '400'. Response body: {"error":"invalid_request","error_description":"AADSTS90002: Tenant '1234' not found. Check to make sure you have the correct tenant ID and are signing into the correct cloud. Check with your subscription administrator, this may happen if there are no active subscriptions for the tenant. Trace ID: a16d0652-86c8-4cb0-89b9-352e22f20000 Correlation ID: 6f9193df-30ef-4e38-9029-36ce6c6e295f Timestamp: 2024-06-18 07:27:00Z","error_codes":[90002],"timestamp":"2024-06-18 07:27:00Z","trace_id":"a16d0652-86c8-4cb0-89b9-352e22f20000","correlation_id":"6f9193df-30ef-4e38-9029-36ce6c6e295f","error_uri":"https://login.microsoftonline.com/error?code=90002"} Endpoint https://login.microsoftonline.com/1234/oauth2/token
+W0618 07:27:00.179084 1076117 azuredisk.go:255] Failed to get node info. Error: NewNodeInfo: Failed to get instance type from Azure cloud provider, nodeName: , error: Retriable: false, RetryAfter: 0s, HTTPStatusCode: 400, RawError: azure.BearerAuthorizer#WithAuthorization: Failed to refresh the Token for request to https://management.azure.com/subscriptions/12345/resourceGroups/rg1/providers/Microsoft.Compute/virtualMachines/?%24expand=instanceView&api-version=2022-03-01: StatusCode=400 -- Original Error: adal: Refresh request failed. Status Code = '400'. Response body: {"error":"invalid_request","error_description":"AADSTS90002: Tenant '1234' not found. Check to make sure you have the correct tenant ID and are signing into the correct cloud. Check with your subscription administrator, this may happen if there are no active subscriptions for the tenant. Trace ID: a16d0652-86c8-4cb0-89b9-352e22f20000 Correlation ID: 6f9193df-30ef-4e38-9029-36ce6c6e295f Timestamp: 2024-06-18 07:27:00Z","error_codes":[90002],"timestamp":"2024-06-18 07:27:00Z","trace_id":"a16d0652-86c8-4cb0-89b9-352e22f20000","correlation_id":"6f9193df-30ef-4e38-9029-36ce6c6e295f","error_uri":"https://login.microsoftonline.com/error?code=90002"} Endpoint https://login.microsoftonline.com/1234/oauth2/token
+I0618 07:27:00.184535 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_VOLUME
+I0618 07:27:00.184559 1076117 driver.go:81] Enabling controller service capability: PUBLISH_UNPUBLISH_VOLUME
+I0618 07:27:00.184566 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_SNAPSHOT
+I0618 07:27:00.184571 1076117 driver.go:81] Enabling controller service capability: CLONE_VOLUME
+I0618 07:27:00.184576 1076117 driver.go:81] Enabling controller service capability: EXPAND_VOLUME
+I0618 07:27:00.184582 1076117 driver.go:81] Enabling controller service capability: SINGLE_NODE_MULTI_WRITER
+I0618 07:27:00.184587 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES
+I0618 07:27:00.184592 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES_PUBLISHED_NODES
+I0618 07:27:00.184598 1076117 driver.go:81] Enabling controller service capability: LIST_SNAPSHOTS
+I0618 07:27:00.184605 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_WRITER
+I0618 07:27:00.184614 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_READER_ONLY
+I0618 07:27:00.184624 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_SINGLE_WRITER
+I0618 07:27:00.184632 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_MULTI_WRITER
+I0618 07:27:00.184774 1076117 server.go:117] Listening for connections on address: &net.TCPAddr{IP:net.IP{0x7f, 0x0, 0x0, 0x1}, Port:33573, Zone:""}
+--- PASS: TestRun (0.47s)
+ --- PASS: TestRun/Successful_run (0.02s)
+ --- PASS: TestRun/Successful_run_without_cloud_config (0.04s)
+ --- PASS: TestRun/Successful_run_with_node_ID_missing (0.02s)
+ --- PASS: TestRun/Successful_run_with_vmss_VMType (0.39s)
+=== RUN TestDriver_checkDiskExists
+I0618 07:27:00.189605 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_VOLUME
+I0618 07:27:00.189630 1076117 driver.go:81] Enabling controller service capability: PUBLISH_UNPUBLISH_VOLUME
+I0618 07:27:00.189636 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_SNAPSHOT
+I0618 07:27:00.189641 1076117 driver.go:81] Enabling controller service capability: LIST_SNAPSHOTS
+I0618 07:27:00.189646 1076117 driver.go:81] Enabling controller service capability: CLONE_VOLUME
+I0618 07:27:00.189652 1076117 driver.go:81] Enabling controller service capability: EXPAND_VOLUME
+I0618 07:27:00.189657 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES
+I0618 07:27:00.189663 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES_PUBLISHED_NODES
+I0618 07:27:00.189669 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_WRITER
+--- PASS: TestDriver_checkDiskExists (0.00s)
+=== RUN TestGetNodeInfoFromLabels
+--- PASS: TestGetNodeInfoFromLabels (0.00s)
+=== RUN TestGetDefaultDiskIOPSReadWrite
+--- PASS: TestGetDefaultDiskIOPSReadWrite (0.00s)
+=== RUN TestGetDefaultDiskMBPSReadWrite
+--- PASS: TestGetDefaultDiskMBPSReadWrite (0.00s)
+=== RUN TestWaitForSnapshot
+=== RUN TestWaitForSnapshot/snapshotID_not_valid
+I0618 07:27:00.205831 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_VOLUME
+I0618 07:27:00.205853 1076117 driver.go:81] Enabling controller service capability: PUBLISH_UNPUBLISH_VOLUME
+I0618 07:27:00.205864 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_SNAPSHOT
+I0618 07:27:00.205869 1076117 driver.go:81] Enabling controller service capability: LIST_SNAPSHOTS
+I0618 07:27:00.205873 1076117 driver.go:81] Enabling controller service capability: CLONE_VOLUME
+I0618 07:27:00.205877 1076117 driver.go:81] Enabling controller service capability: EXPAND_VOLUME
+I0618 07:27:00.205888 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES
+I0618 07:27:00.205891 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES_PUBLISHED_NODES
+I0618 07:27:00.205895 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_WRITER
+=== RUN TestWaitForSnapshot/timeout_for_waiting_snapshot_copy_cross_region
+I0618 07:27:00.218547 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_VOLUME
+I0618 07:27:00.218570 1076117 driver.go:81] Enabling controller service capability: PUBLISH_UNPUBLISH_VOLUME
+I0618 07:27:00.218577 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_SNAPSHOT
+I0618 07:27:00.218582 1076117 driver.go:81] Enabling controller service capability: LIST_SNAPSHOTS
+I0618 07:27:00.218588 1076117 driver.go:81] Enabling controller service capability: CLONE_VOLUME
+I0618 07:27:00.218594 1076117 driver.go:81] Enabling controller service capability: EXPAND_VOLUME
+I0618 07:27:00.218600 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES
+I0618 07:27:00.218606 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES_PUBLISHED_NODES
+I0618 07:27:00.218612 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_WRITER
+=== RUN TestWaitForSnapshot/succeed_for_waiting_snapshot_copy_cross_region
+I0618 07:27:00.242024 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_VOLUME
+I0618 07:27:00.242043 1076117 driver.go:81] Enabling controller service capability: PUBLISH_UNPUBLISH_VOLUME
+I0618 07:27:00.242048 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_SNAPSHOT
+I0618 07:27:00.242053 1076117 driver.go:81] Enabling controller service capability: LIST_SNAPSHOTS
+I0618 07:27:00.242057 1076117 driver.go:81] Enabling controller service capability: CLONE_VOLUME
+I0618 07:27:00.242062 1076117 driver.go:81] Enabling controller service capability: EXPAND_VOLUME
+I0618 07:27:00.242070 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES
+I0618 07:27:00.242075 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES_PUBLISHED_NODES
+I0618 07:27:00.242082 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_WRITER
+--- PASS: TestWaitForSnapshot (0.05s)
+ --- PASS: TestWaitForSnapshot/snapshotID_not_valid (0.02s)
+ --- PASS: TestWaitForSnapshot/timeout_for_waiting_snapshot_copy_cross_region (0.03s)
+ --- PASS: TestWaitForSnapshot/succeed_for_waiting_snapshot_copy_cross_region (0.01s)
+=== RUN TestGetVMSSInstanceName
+--- PASS: TestGetVMSSInstanceName (0.00s)
+=== RUN TestCheckDiskCapacity_V1
+I0618 07:27:00.246320 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_VOLUME
+I0618 07:27:00.246336 1076117 driver.go:81] Enabling controller service capability: PUBLISH_UNPUBLISH_VOLUME
+I0618 07:27:00.246341 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_SNAPSHOT
+I0618 07:27:00.246346 1076117 driver.go:81] Enabling controller service capability: LIST_SNAPSHOTS
+I0618 07:27:00.246351 1076117 driver.go:81] Enabling controller service capability: CLONE_VOLUME
+I0618 07:27:00.246356 1076117 driver.go:81] Enabling controller service capability: EXPAND_VOLUME
+I0618 07:27:00.246361 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES
+I0618 07:27:00.246365 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES_PUBLISHED_NODES
+I0618 07:27:00.246371 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_WRITER
+W0618 07:27:00.246419 1076117 azuredisk.go:340] skip checkDiskCapacity(unit-test, unit-test) since it's still in throttling
+--- PASS: TestCheckDiskCapacity_V1 (0.00s)
+=== RUN TestDriver_checkDiskExists_V1
+I0618 07:27:00.268582 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_VOLUME
+I0618 07:27:00.268602 1076117 driver.go:81] Enabling controller service capability: PUBLISH_UNPUBLISH_VOLUME
+I0618 07:27:00.268609 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_SNAPSHOT
+I0618 07:27:00.268618 1076117 driver.go:81] Enabling controller service capability: LIST_SNAPSHOTS
+I0618 07:27:00.268623 1076117 driver.go:81] Enabling controller service capability: CLONE_VOLUME
+I0618 07:27:00.268628 1076117 driver.go:81] Enabling controller service capability: EXPAND_VOLUME
+I0618 07:27:00.268632 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES
+I0618 07:27:00.268637 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES_PUBLISHED_NODES
+I0618 07:27:00.268642 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_WRITER
+W0618 07:27:00.268763 1076117 azuredisk.go:321] skip checkDiskExists(testurl/subscriptions/12/resourceGroups/23/providers/Microsoft.Compute/disks/name) since it's still in throttling
+--- PASS: TestDriver_checkDiskExists_V1 (0.02s)
+=== RUN TestCreateVolume
+=== RUN TestCreateVolume/_invalid_
+I0618 07:27:00.287387 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_VOLUME
+I0618 07:27:00.287407 1076117 driver.go:81] Enabling controller service capability: PUBLISH_UNPUBLISH_VOLUME
+I0618 07:27:00.287413 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_SNAPSHOT
+I0618 07:27:00.287418 1076117 driver.go:81] Enabling controller service capability: LIST_SNAPSHOTS
+I0618 07:27:00.287423 1076117 driver.go:81] Enabling controller service capability: CLONE_VOLUME
+I0618 07:27:00.287429 1076117 driver.go:81] Enabling controller service capability: EXPAND_VOLUME
+I0618 07:27:00.287434 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES
+I0618 07:27:00.287442 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES_PUBLISHED_NODES
+I0618 07:27:00.287457 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_WRITER
+E0618 07:27:00.287518 1076117 controllerserver.go:66] invalid create volume req:
+=== RUN TestCreateVolume/_volume_name_missing
+I0618 07:27:00.291890 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_VOLUME
+I0618 07:27:00.291907 1076117 driver.go:81] Enabling controller service capability: PUBLISH_UNPUBLISH_VOLUME
+I0618 07:27:00.291913 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_SNAPSHOT
+I0618 07:27:00.291918 1076117 driver.go:81] Enabling controller service capability: LIST_SNAPSHOTS
+I0618 07:27:00.291922 1076117 driver.go:81] Enabling controller service capability: CLONE_VOLUME
+I0618 07:27:00.291927 1076117 driver.go:81] Enabling controller service capability: EXPAND_VOLUME
+I0618 07:27:00.291931 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES
+I0618 07:27:00.291935 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES_PUBLISHED_NODES
+I0618 07:27:00.291941 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_WRITER
+=== RUN TestCreateVolume/volume_capabilities_missing
+I0618 07:27:00.295398 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_VOLUME
+I0618 07:27:00.295414 1076117 driver.go:81] Enabling controller service capability: PUBLISH_UNPUBLISH_VOLUME
+I0618 07:27:00.295420 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_SNAPSHOT
+I0618 07:27:00.295428 1076117 driver.go:81] Enabling controller service capability: LIST_SNAPSHOTS
+I0618 07:27:00.295433 1076117 driver.go:81] Enabling controller service capability: CLONE_VOLUME
+I0618 07:27:00.295438 1076117 driver.go:81] Enabling controller service capability: EXPAND_VOLUME
+I0618 07:27:00.295443 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES
+I0618 07:27:00.295447 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES_PUBLISHED_NODES
+I0618 07:27:00.295453 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_WRITER
+=== RUN TestCreateVolume/require_volume_size_exceed
+I0618 07:27:00.298976 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_VOLUME
+I0618 07:27:00.298989 1076117 driver.go:81] Enabling controller service capability: PUBLISH_UNPUBLISH_VOLUME
+I0618 07:27:00.298994 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_SNAPSHOT
+I0618 07:27:00.298999 1076117 driver.go:81] Enabling controller service capability: LIST_SNAPSHOTS
+I0618 07:27:00.299004 1076117 driver.go:81] Enabling controller service capability: CLONE_VOLUME
+I0618 07:27:00.299011 1076117 driver.go:81] Enabling controller service capability: EXPAND_VOLUME
+I0618 07:27:00.299017 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES
+I0618 07:27:00.299021 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES_PUBLISHED_NODES
+I0618 07:27:00.299026 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_WRITER
+=== RUN TestCreateVolume/logical_sector_size_parse_error
+I0618 07:27:00.302218 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_VOLUME
+I0618 07:27:00.302232 1076117 driver.go:81] Enabling controller service capability: PUBLISH_UNPUBLISH_VOLUME
+I0618 07:27:00.302237 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_SNAPSHOT
+I0618 07:27:00.302242 1076117 driver.go:81] Enabling controller service capability: LIST_SNAPSHOTS
+I0618 07:27:00.302247 1076117 driver.go:81] Enabling controller service capability: CLONE_VOLUME
+I0618 07:27:00.302252 1076117 driver.go:81] Enabling controller service capability: EXPAND_VOLUME
+I0618 07:27:00.302256 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES
+I0618 07:27:00.302261 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES_PUBLISHED_NODES
+I0618 07:27:00.302267 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_WRITER
+=== RUN TestCreateVolume/maxshare_parse_error_
+I0618 07:27:00.305438 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_VOLUME
+I0618 07:27:00.305451 1076117 driver.go:81] Enabling controller service capability: PUBLISH_UNPUBLISH_VOLUME
+I0618 07:27:00.305456 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_SNAPSHOT
+I0618 07:27:00.305462 1076117 driver.go:81] Enabling controller service capability: LIST_SNAPSHOTS
+I0618 07:27:00.305467 1076117 driver.go:81] Enabling controller service capability: CLONE_VOLUME
+I0618 07:27:00.305472 1076117 driver.go:81] Enabling controller service capability: EXPAND_VOLUME
+I0618 07:27:00.305477 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES
+I0618 07:27:00.305484 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES_PUBLISHED_NODES
+I0618 07:27:00.305490 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_WRITER
+=== RUN TestCreateVolume/maxshare_invalid_value_
+I0618 07:27:00.308869 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_VOLUME
+I0618 07:27:00.308887 1076117 driver.go:81] Enabling controller service capability: PUBLISH_UNPUBLISH_VOLUME
+I0618 07:27:00.308893 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_SNAPSHOT
+I0618 07:27:00.308898 1076117 driver.go:81] Enabling controller service capability: LIST_SNAPSHOTS
+I0618 07:27:00.308907 1076117 driver.go:81] Enabling controller service capability: CLONE_VOLUME
+I0618 07:27:00.308912 1076117 driver.go:81] Enabling controller service capability: EXPAND_VOLUME
+I0618 07:27:00.308916 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES
+I0618 07:27:00.308920 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES_PUBLISHED_NODES
+I0618 07:27:00.308926 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_WRITER
+=== RUN TestCreateVolume/invalid_perf_profile
+I0618 07:27:00.312973 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_VOLUME
+I0618 07:27:00.313002 1076117 driver.go:81] Enabling controller service capability: PUBLISH_UNPUBLISH_VOLUME
+I0618 07:27:00.313008 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_SNAPSHOT
+I0618 07:27:00.313013 1076117 driver.go:81] Enabling controller service capability: LIST_SNAPSHOTS
+I0618 07:27:00.313018 1076117 driver.go:81] Enabling controller service capability: CLONE_VOLUME
+I0618 07:27:00.313023 1076117 driver.go:81] Enabling controller service capability: EXPAND_VOLUME
+I0618 07:27:00.313028 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES
+I0618 07:27:00.313032 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES_PUBLISHED_NODES
+I0618 07:27:00.313038 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_WRITER
+=== RUN TestCreateVolume/Volume_capability_not_supported_
+I0618 07:27:00.316521 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_VOLUME
+I0618 07:27:00.316533 1076117 driver.go:81] Enabling controller service capability: PUBLISH_UNPUBLISH_VOLUME
+I0618 07:27:00.316538 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_SNAPSHOT
+I0618 07:27:00.316543 1076117 driver.go:81] Enabling controller service capability: LIST_SNAPSHOTS
+I0618 07:27:00.316548 1076117 driver.go:81] Enabling controller service capability: CLONE_VOLUME
+I0618 07:27:00.316553 1076117 driver.go:81] Enabling controller service capability: EXPAND_VOLUME
+I0618 07:27:00.316557 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES
+I0618 07:27:00.316565 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES_PUBLISHED_NODES
+I0618 07:27:00.316570 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_WRITER
+=== RUN TestCreateVolume/normalize_storageaccounttype_error_
+I0618 07:27:00.321365 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_VOLUME
+I0618 07:27:00.321376 1076117 driver.go:81] Enabling controller service capability: PUBLISH_UNPUBLISH_VOLUME
+I0618 07:27:00.321379 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_SNAPSHOT
+I0618 07:27:00.321383 1076117 driver.go:81] Enabling controller service capability: LIST_SNAPSHOTS
+I0618 07:27:00.321385 1076117 driver.go:81] Enabling controller service capability: CLONE_VOLUME
+I0618 07:27:00.321388 1076117 driver.go:81] Enabling controller service capability: EXPAND_VOLUME
+I0618 07:27:00.321391 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES
+I0618 07:27:00.321393 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES_PUBLISHED_NODES
+I0618 07:27:00.321397 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_WRITER
+I0618 07:27:00.321425 1076117 controllerserver.go:108] increasing requested disk size from 0GiB to 1GiB (minimal disk size)
+=== RUN TestCreateVolume/normalize_cache_mode_error_
+I0618 07:27:00.328036 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_VOLUME
+I0618 07:27:00.328047 1076117 driver.go:81] Enabling controller service capability: PUBLISH_UNPUBLISH_VOLUME
+I0618 07:27:00.328057 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_SNAPSHOT
+I0618 07:27:00.328060 1076117 driver.go:81] Enabling controller service capability: LIST_SNAPSHOTS
+I0618 07:27:00.328063 1076117 driver.go:81] Enabling controller service capability: CLONE_VOLUME
+I0618 07:27:00.328066 1076117 driver.go:81] Enabling controller service capability: EXPAND_VOLUME
+I0618 07:27:00.328069 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES
+I0618 07:27:00.328072 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES_PUBLISHED_NODES
+I0618 07:27:00.328075 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_WRITER
+I0618 07:27:00.328104 1076117 controllerserver.go:108] increasing requested disk size from 0GiB to 1GiB (minimal disk size)
+=== RUN TestCreateVolume/custom_tags_error_
+I0618 07:27:00.331944 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_VOLUME
+I0618 07:27:00.331955 1076117 driver.go:81] Enabling controller service capability: PUBLISH_UNPUBLISH_VOLUME
+I0618 07:27:00.331958 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_SNAPSHOT
+I0618 07:27:00.331961 1076117 driver.go:81] Enabling controller service capability: LIST_SNAPSHOTS
+I0618 07:27:00.331964 1076117 driver.go:81] Enabling controller service capability: CLONE_VOLUME
+I0618 07:27:00.331967 1076117 driver.go:81] Enabling controller service capability: EXPAND_VOLUME
+I0618 07:27:00.331970 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES
+I0618 07:27:00.331972 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES_PUBLISHED_NODES
+I0618 07:27:00.331979 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_WRITER
+=== RUN TestCreateVolume/create_managed_disk_error_
+I0618 07:27:00.334937 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_VOLUME
+I0618 07:27:00.334947 1076117 driver.go:81] Enabling controller service capability: PUBLISH_UNPUBLISH_VOLUME
+I0618 07:27:00.334951 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_SNAPSHOT
+I0618 07:27:00.334953 1076117 driver.go:81] Enabling controller service capability: LIST_SNAPSHOTS
+I0618 07:27:00.334956 1076117 driver.go:81] Enabling controller service capability: CLONE_VOLUME
+I0618 07:27:00.334959 1076117 driver.go:81] Enabling controller service capability: EXPAND_VOLUME
+I0618 07:27:00.334962 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES
+I0618 07:27:00.334964 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES_PUBLISHED_NODES
+I0618 07:27:00.334968 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_WRITER
+I0618 07:27:00.335013 1076117 controllerserver.go:108] increasing requested disk size from 0GiB to 1GiB (minimal disk size)
+=== RUN TestCreateVolume/create_managed_disk_not_found_error_
+I0618 07:27:00.338132 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_VOLUME
+I0618 07:27:00.338145 1076117 driver.go:81] Enabling controller service capability: PUBLISH_UNPUBLISH_VOLUME
+I0618 07:27:00.338150 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_SNAPSHOT
+I0618 07:27:00.338155 1076117 driver.go:81] Enabling controller service capability: LIST_SNAPSHOTS
+I0618 07:27:00.338159 1076117 driver.go:81] Enabling controller service capability: CLONE_VOLUME
+I0618 07:27:00.338164 1076117 driver.go:81] Enabling controller service capability: EXPAND_VOLUME
+I0618 07:27:00.338168 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES
+I0618 07:27:00.338174 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES_PUBLISHED_NODES
+I0618 07:27:00.338180 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_WRITER
+I0618 07:27:00.338226 1076117 controllerserver.go:108] increasing requested disk size from 0GiB to 1GiB (minimal disk size)
+=== RUN TestCreateVolume/valid_request_ZRS
+I0618 07:27:00.341683 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_VOLUME
+I0618 07:27:00.341696 1076117 driver.go:81] Enabling controller service capability: PUBLISH_UNPUBLISH_VOLUME
+I0618 07:27:00.341701 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_SNAPSHOT
+I0618 07:27:00.341706 1076117 driver.go:81] Enabling controller service capability: LIST_SNAPSHOTS
+I0618 07:27:00.341715 1076117 driver.go:81] Enabling controller service capability: CLONE_VOLUME
+I0618 07:27:00.341721 1076117 driver.go:81] Enabling controller service capability: EXPAND_VOLUME
+I0618 07:27:00.341727 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES
+I0618 07:27:00.341732 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES_PUBLISHED_NODES
+I0618 07:27:00.341737 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_WRITER
+=== RUN TestCreateVolume/valid_request
+I0618 07:27:00.347793 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_VOLUME
+I0618 07:27:00.347807 1076117 driver.go:81] Enabling controller service capability: PUBLISH_UNPUBLISH_VOLUME
+I0618 07:27:00.347812 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_SNAPSHOT
+I0618 07:27:00.347817 1076117 driver.go:81] Enabling controller service capability: LIST_SNAPSHOTS
+I0618 07:27:00.347822 1076117 driver.go:81] Enabling controller service capability: CLONE_VOLUME
+I0618 07:27:00.347827 1076117 driver.go:81] Enabling controller service capability: EXPAND_VOLUME
+I0618 07:27:00.347832 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES
+I0618 07:27:00.347838 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES_PUBLISHED_NODES
+I0618 07:27:00.347844 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_WRITER
+=== RUN TestCreateVolume/invalid_parameter
+I0618 07:27:00.350863 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_VOLUME
+I0618 07:27:00.350877 1076117 driver.go:81] Enabling controller service capability: PUBLISH_UNPUBLISH_VOLUME
+I0618 07:27:00.350883 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_SNAPSHOT
+I0618 07:27:00.350887 1076117 driver.go:81] Enabling controller service capability: LIST_SNAPSHOTS
+I0618 07:27:00.350892 1076117 driver.go:81] Enabling controller service capability: CLONE_VOLUME
+I0618 07:27:00.350897 1076117 driver.go:81] Enabling controller service capability: EXPAND_VOLUME
+I0618 07:27:00.350902 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES
+I0618 07:27:00.350908 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES_PUBLISHED_NODES
+I0618 07:27:00.350915 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_WRITER
+=== RUN TestCreateVolume/[Failure]_advanced_perfProfile_fails_if_no_device_settings_provided
+I0618 07:27:00.364069 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_VOLUME
+I0618 07:27:00.364085 1076117 driver.go:81] Enabling controller service capability: PUBLISH_UNPUBLISH_VOLUME
+I0618 07:27:00.364090 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_SNAPSHOT
+I0618 07:27:00.364096 1076117 driver.go:81] Enabling controller service capability: LIST_SNAPSHOTS
+I0618 07:27:00.364109 1076117 driver.go:81] Enabling controller service capability: CLONE_VOLUME
+I0618 07:27:00.364117 1076117 driver.go:81] Enabling controller service capability: EXPAND_VOLUME
+I0618 07:27:00.364125 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES
+I0618 07:27:00.364133 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES_PUBLISHED_NODES
+I0618 07:27:00.364142 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_WRITER
+=== RUN TestCreateVolume/valid_PerformancePlus_request,_disk_resizes_to_min_required_size
+I0618 07:27:00.368843 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_VOLUME
+I0618 07:27:00.368858 1076117 driver.go:81] Enabling controller service capability: PUBLISH_UNPUBLISH_VOLUME
+I0618 07:27:00.368867 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_SNAPSHOT
+I0618 07:27:00.368872 1076117 driver.go:81] Enabling controller service capability: LIST_SNAPSHOTS
+I0618 07:27:00.368877 1076117 driver.go:81] Enabling controller service capability: CLONE_VOLUME
+I0618 07:27:00.368881 1076117 driver.go:81] Enabling controller service capability: EXPAND_VOLUME
+I0618 07:27:00.368886 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES
+I0618 07:27:00.368890 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES_PUBLISHED_NODES
+I0618 07:27:00.368896 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_WRITER
+W0618 07:27:00.368961 1076117 controllerserver.go:104] using PerformancePlus, increasing requested disk size from 10GiB to 513GiB (minimal size for PerformancePlus feature)
+--- PASS: TestCreateVolume (0.10s)
+ --- PASS: TestCreateVolume/_invalid_ (0.02s)
+ --- PASS: TestCreateVolume/_volume_name_missing (0.00s)
+ --- PASS: TestCreateVolume/volume_capabilities_missing (0.00s)
+ --- PASS: TestCreateVolume/require_volume_size_exceed (0.00s)
+ --- PASS: TestCreateVolume/logical_sector_size_parse_error (0.00s)
+ --- PASS: TestCreateVolume/maxshare_parse_error_ (0.00s)
+ --- PASS: TestCreateVolume/maxshare_invalid_value_ (0.00s)
+ --- PASS: TestCreateVolume/invalid_perf_profile (0.00s)
+ --- PASS: TestCreateVolume/Volume_capability_not_supported_ (0.00s)
+ --- PASS: TestCreateVolume/normalize_storageaccounttype_error_ (0.00s)
+ --- PASS: TestCreateVolume/normalize_cache_mode_error_ (0.01s)
+ --- PASS: TestCreateVolume/custom_tags_error_ (0.00s)
+ --- PASS: TestCreateVolume/create_managed_disk_error_ (0.00s)
+ --- PASS: TestCreateVolume/create_managed_disk_not_found_error_ (0.00s)
+ --- PASS: TestCreateVolume/valid_request_ZRS (0.00s)
+ --- PASS: TestCreateVolume/valid_request (0.01s)
+ --- PASS: TestCreateVolume/invalid_parameter (0.00s)
+ --- PASS: TestCreateVolume/[Failure]_advanced_perfProfile_fails_if_no_device_settings_provided (0.01s)
+ --- PASS: TestCreateVolume/valid_PerformancePlus_request,_disk_resizes_to_min_required_size (0.00s)
+=== RUN TestDeleteVolume
+I0618 07:27:00.372712 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_VOLUME
+I0618 07:27:00.372794 1076117 driver.go:81] Enabling controller service capability: PUBLISH_UNPUBLISH_VOLUME
+I0618 07:27:00.372800 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_SNAPSHOT
+I0618 07:27:00.372805 1076117 driver.go:81] Enabling controller service capability: LIST_SNAPSHOTS
+I0618 07:27:00.372809 1076117 driver.go:81] Enabling controller service capability: CLONE_VOLUME
+I0618 07:27:00.372813 1076117 driver.go:81] Enabling controller service capability: EXPAND_VOLUME
+I0618 07:27:00.372818 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES
+I0618 07:27:00.372822 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES_PUBLISHED_NODES
+I0618 07:27:00.372827 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_WRITER
+E0618 07:27:00.372991 1076117 controllerserver.go:329] validateDiskURI(123) in DeleteVolume failed with error: invalid DiskURI: 123, correct format: [/subscriptions/{sub-id}/resourcegroups/{group-name}/providers/microsoft.compute/disks/{disk-id}]
+--- PASS: TestDeleteVolume (0.00s)
+=== RUN TestControllerGetVolume
+I0618 07:27:00.376737 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_VOLUME
+I0618 07:27:00.376749 1076117 driver.go:81] Enabling controller service capability: PUBLISH_UNPUBLISH_VOLUME
+I0618 07:27:00.376756 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_SNAPSHOT
+I0618 07:27:00.376761 1076117 driver.go:81] Enabling controller service capability: LIST_SNAPSHOTS
+I0618 07:27:00.376766 1076117 driver.go:81] Enabling controller service capability: CLONE_VOLUME
+I0618 07:27:00.376770 1076117 driver.go:81] Enabling controller service capability: EXPAND_VOLUME
+I0618 07:27:00.376775 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES
+I0618 07:27:00.376779 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES_PUBLISHED_NODES
+I0618 07:27:00.376785 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_WRITER
+--- PASS: TestControllerGetVolume (0.00s)
+=== RUN TestGetSnapshotInfo
+I0618 07:27:00.380560 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_VOLUME
+I0618 07:27:00.380571 1076117 driver.go:81] Enabling controller service capability: PUBLISH_UNPUBLISH_VOLUME
+I0618 07:27:00.380577 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_SNAPSHOT
+I0618 07:27:00.380585 1076117 driver.go:81] Enabling controller service capability: LIST_SNAPSHOTS
+I0618 07:27:00.380591 1076117 driver.go:81] Enabling controller service capability: CLONE_VOLUME
+I0618 07:27:00.380596 1076117 driver.go:81] Enabling controller service capability: EXPAND_VOLUME
+I0618 07:27:00.380600 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES
+I0618 07:27:00.380605 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES_PUBLISHED_NODES
+I0618 07:27:00.380611 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_WRITER
+--- PASS: TestGetSnapshotInfo (0.00s)
+=== RUN TestControllerPublishVolume
+I0618 07:27:00.393343 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_VOLUME
+I0618 07:27:00.393360 1076117 driver.go:81] Enabling controller service capability: PUBLISH_UNPUBLISH_VOLUME
+I0618 07:27:00.393364 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_SNAPSHOT
+I0618 07:27:00.393367 1076117 driver.go:81] Enabling controller service capability: LIST_SNAPSHOTS
+I0618 07:27:00.393371 1076117 driver.go:81] Enabling controller service capability: CLONE_VOLUME
+I0618 07:27:00.393374 1076117 driver.go:81] Enabling controller service capability: EXPAND_VOLUME
+I0618 07:27:00.393377 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES
+I0618 07:27:00.393380 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES_PUBLISHED_NODES
+I0618 07:27:00.393384 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_WRITER
+=== RUN TestControllerPublishVolume/Volume_ID_missing
+I0618 07:27:00.396489 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_VOLUME
+I0618 07:27:00.396506 1076117 driver.go:81] Enabling controller service capability: PUBLISH_UNPUBLISH_VOLUME
+I0618 07:27:00.396510 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_SNAPSHOT
+I0618 07:27:00.396513 1076117 driver.go:81] Enabling controller service capability: LIST_SNAPSHOTS
+I0618 07:27:00.396516 1076117 driver.go:81] Enabling controller service capability: CLONE_VOLUME
+I0618 07:27:00.396519 1076117 driver.go:81] Enabling controller service capability: EXPAND_VOLUME
+I0618 07:27:00.396522 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES
+I0618 07:27:00.396525 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES_PUBLISHED_NODES
+I0618 07:27:00.396529 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_WRITER
+=== RUN TestControllerPublishVolume/Volume_capability_missing
+I0618 07:27:00.399973 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_VOLUME
+I0618 07:27:00.399990 1076117 driver.go:81] Enabling controller service capability: PUBLISH_UNPUBLISH_VOLUME
+I0618 07:27:00.399994 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_SNAPSHOT
+I0618 07:27:00.399998 1076117 driver.go:81] Enabling controller service capability: LIST_SNAPSHOTS
+I0618 07:27:00.400000 1076117 driver.go:81] Enabling controller service capability: CLONE_VOLUME
+I0618 07:27:00.400004 1076117 driver.go:81] Enabling controller service capability: EXPAND_VOLUME
+I0618 07:27:00.400007 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES
+I0618 07:27:00.400014 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES_PUBLISHED_NODES
+I0618 07:27:00.400019 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_WRITER
+=== RUN TestControllerPublishVolume/Volume_capability_not_supported
+I0618 07:27:00.405746 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_VOLUME
+I0618 07:27:00.405763 1076117 driver.go:81] Enabling controller service capability: PUBLISH_UNPUBLISH_VOLUME
+I0618 07:27:00.405766 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_SNAPSHOT
+I0618 07:27:00.405770 1076117 driver.go:81] Enabling controller service capability: LIST_SNAPSHOTS
+I0618 07:27:00.405772 1076117 driver.go:81] Enabling controller service capability: CLONE_VOLUME
+I0618 07:27:00.405776 1076117 driver.go:81] Enabling controller service capability: EXPAND_VOLUME
+I0618 07:27:00.405779 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES
+I0618 07:27:00.405782 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES_PUBLISHED_NODES
+I0618 07:27:00.405786 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_WRITER
+=== RUN TestControllerPublishVolume/diskName_error
+=== RUN TestControllerPublishVolume/NodeID_missing
+=== RUN TestControllerPublishVolume/failed_provisioning_state
+W0618 07:27:00.406313 1076117 controllerserver.go:419] VM(unit-test-node) is in failed state, update VM first
+=== RUN TestControllerPublishVolume/Volume_already_attached_success
+I0618 07:27:00.409624 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_VOLUME
+I0618 07:27:00.409638 1076117 driver.go:81] Enabling controller service capability: PUBLISH_UNPUBLISH_VOLUME
+I0618 07:27:00.409643 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_SNAPSHOT
+I0618 07:27:00.409647 1076117 driver.go:81] Enabling controller service capability: LIST_SNAPSHOTS
+I0618 07:27:00.409652 1076117 driver.go:81] Enabling controller service capability: CLONE_VOLUME
+I0618 07:27:00.409656 1076117 driver.go:81] Enabling controller service capability: EXPAND_VOLUME
+I0618 07:27:00.409661 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES
+I0618 07:27:00.409665 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES_PUBLISHED_NODES
+I0618 07:27:00.409670 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_WRITER
+=== RUN TestControllerPublishVolume/CachingMode_Error
+I0618 07:27:00.413055 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_VOLUME
+I0618 07:27:00.413069 1076117 driver.go:81] Enabling controller service capability: PUBLISH_UNPUBLISH_VOLUME
+I0618 07:27:00.413073 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_SNAPSHOT
+I0618 07:27:00.413076 1076117 driver.go:81] Enabling controller service capability: LIST_SNAPSHOTS
+I0618 07:27:00.413079 1076117 driver.go:81] Enabling controller service capability: CLONE_VOLUME
+I0618 07:27:00.413082 1076117 driver.go:81] Enabling controller service capability: EXPAND_VOLUME
+I0618 07:27:00.413085 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES
+I0618 07:27:00.413089 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES_PUBLISHED_NODES
+I0618 07:27:00.413096 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_WRITER
+--- PASS: TestControllerPublishVolume (0.03s)
+ --- PASS: TestControllerPublishVolume/Volume_ID_missing (0.00s)
+ --- PASS: TestControllerPublishVolume/Volume_capability_missing (0.00s)
+ --- PASS: TestControllerPublishVolume/Volume_capability_not_supported (0.01s)
+ --- PASS: TestControllerPublishVolume/diskName_error (0.00s)
+ --- PASS: TestControllerPublishVolume/NodeID_missing (0.00s)
+ --- PASS: TestControllerPublishVolume/failed_provisioning_state (0.00s)
+ --- PASS: TestControllerPublishVolume/Volume_already_attached_success (0.00s)
+ --- PASS: TestControllerPublishVolume/CachingMode_Error (0.00s)
+=== RUN TestControllerUnpublishVolume
+I0618 07:27:00.416501 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_VOLUME
+I0618 07:27:00.416516 1076117 driver.go:81] Enabling controller service capability: PUBLISH_UNPUBLISH_VOLUME
+I0618 07:27:00.416519 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_SNAPSHOT
+I0618 07:27:00.416523 1076117 driver.go:81] Enabling controller service capability: LIST_SNAPSHOTS
+I0618 07:27:00.416525 1076117 driver.go:81] Enabling controller service capability: CLONE_VOLUME
+I0618 07:27:00.416528 1076117 driver.go:81] Enabling controller service capability: EXPAND_VOLUME
+I0618 07:27:00.416531 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES
+I0618 07:27:00.416535 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES_PUBLISHED_NODES
+I0618 07:27:00.416543 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_WRITER
+--- PASS: TestControllerUnpublishVolume (0.00s)
+=== RUN TestControllerGetCapabilities
+I0618 07:27:00.419834 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_VOLUME
+I0618 07:27:00.419850 1076117 driver.go:81] Enabling controller service capability: PUBLISH_UNPUBLISH_VOLUME
+I0618 07:27:00.419854 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_SNAPSHOT
+I0618 07:27:00.419860 1076117 driver.go:81] Enabling controller service capability: LIST_SNAPSHOTS
+I0618 07:27:00.419863 1076117 driver.go:81] Enabling controller service capability: CLONE_VOLUME
+I0618 07:27:00.419866 1076117 driver.go:81] Enabling controller service capability: EXPAND_VOLUME
+I0618 07:27:00.419870 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES
+I0618 07:27:00.419875 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES_PUBLISHED_NODES
+I0618 07:27:00.419880 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_WRITER
+--- PASS: TestControllerGetCapabilities (0.00s)
+=== RUN TestControllerExpandVolume
+=== RUN TestControllerExpandVolume/Volume_ID_missing
+I0618 07:27:00.423064 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_VOLUME
+I0618 07:27:00.423077 1076117 driver.go:81] Enabling controller service capability: PUBLISH_UNPUBLISH_VOLUME
+I0618 07:27:00.423081 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_SNAPSHOT
+I0618 07:27:00.423084 1076117 driver.go:81] Enabling controller service capability: LIST_SNAPSHOTS
+I0618 07:27:00.423087 1076117 driver.go:81] Enabling controller service capability: CLONE_VOLUME
+I0618 07:27:00.423094 1076117 driver.go:81] Enabling controller service capability: EXPAND_VOLUME
+I0618 07:27:00.423098 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES
+I0618 07:27:00.423103 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES_PUBLISHED_NODES
+I0618 07:27:00.423108 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_WRITER
+=== RUN TestControllerExpandVolume/Volume_capabilities_missing
+I0618 07:27:00.426258 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_VOLUME
+I0618 07:27:00.426273 1076117 driver.go:81] Enabling controller service capability: PUBLISH_UNPUBLISH_VOLUME
+I0618 07:27:00.426277 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_SNAPSHOT
+I0618 07:27:00.426280 1076117 driver.go:81] Enabling controller service capability: LIST_SNAPSHOTS
+I0618 07:27:00.426283 1076117 driver.go:81] Enabling controller service capability: CLONE_VOLUME
+I0618 07:27:00.426286 1076117 driver.go:81] Enabling controller service capability: EXPAND_VOLUME
+I0618 07:27:00.426290 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES
+I0618 07:27:00.426296 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES_PUBLISHED_NODES
+I0618 07:27:00.426301 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_WRITER
+=== RUN TestControllerExpandVolume/Volume_Capacity_range_missing
+I0618 07:27:00.433759 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_VOLUME
+I0618 07:27:00.433776 1076117 driver.go:81] Enabling controller service capability: PUBLISH_UNPUBLISH_VOLUME
+I0618 07:27:00.433779 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_SNAPSHOT
+I0618 07:27:00.433783 1076117 driver.go:81] Enabling controller service capability: LIST_SNAPSHOTS
+I0618 07:27:00.433785 1076117 driver.go:81] Enabling controller service capability: CLONE_VOLUME
+I0618 07:27:00.433792 1076117 driver.go:81] Enabling controller service capability: EXPAND_VOLUME
+I0618 07:27:00.433795 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES
+I0618 07:27:00.433799 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES_PUBLISHED_NODES
+I0618 07:27:00.433805 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_WRITER
+=== RUN TestControllerExpandVolume/disk_type_is_not_managedDisk
+I0618 07:27:00.437167 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_VOLUME
+I0618 07:27:00.437183 1076117 driver.go:81] Enabling controller service capability: PUBLISH_UNPUBLISH_VOLUME
+I0618 07:27:00.437186 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_SNAPSHOT
+I0618 07:27:00.437190 1076117 driver.go:81] Enabling controller service capability: LIST_SNAPSHOTS
+I0618 07:27:00.437192 1076117 driver.go:81] Enabling controller service capability: CLONE_VOLUME
+I0618 07:27:00.437196 1076117 driver.go:81] Enabling controller service capability: EXPAND_VOLUME
+I0618 07:27:00.437198 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES
+I0618 07:27:00.437202 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES_PUBLISHED_NODES
+I0618 07:27:00.437208 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_WRITER
+=== RUN TestControllerExpandVolume/Disk_URI_not_valid
+I0618 07:27:00.446073 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_VOLUME
+I0618 07:27:00.446096 1076117 driver.go:81] Enabling controller service capability: PUBLISH_UNPUBLISH_VOLUME
+I0618 07:27:00.446102 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_SNAPSHOT
+I0618 07:27:00.446107 1076117 driver.go:81] Enabling controller service capability: LIST_SNAPSHOTS
+I0618 07:27:00.446111 1076117 driver.go:81] Enabling controller service capability: CLONE_VOLUME
+I0618 07:27:00.446116 1076117 driver.go:81] Enabling controller service capability: EXPAND_VOLUME
+I0618 07:27:00.446121 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES
+I0618 07:27:00.446126 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES_PUBLISHED_NODES
+I0618 07:27:00.446132 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_WRITER
+=== RUN TestControllerExpandVolume/DiskSize_missing
+I0618 07:27:00.461515 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_VOLUME
+I0618 07:27:00.461537 1076117 driver.go:81] Enabling controller service capability: PUBLISH_UNPUBLISH_VOLUME
+I0618 07:27:00.461543 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_SNAPSHOT
+I0618 07:27:00.461548 1076117 driver.go:81] Enabling controller service capability: LIST_SNAPSHOTS
+I0618 07:27:00.461553 1076117 driver.go:81] Enabling controller service capability: CLONE_VOLUME
+I0618 07:27:00.461559 1076117 driver.go:81] Enabling controller service capability: EXPAND_VOLUME
+I0618 07:27:00.461564 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES
+I0618 07:27:00.461569 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES_PUBLISHED_NODES
+I0618 07:27:00.461577 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_WRITER
+--- PASS: TestControllerExpandVolume (0.04s)
+ --- PASS: TestControllerExpandVolume/Volume_ID_missing (0.00s)
+ --- PASS: TestControllerExpandVolume/Volume_capabilities_missing (0.00s)
+ --- PASS: TestControllerExpandVolume/Volume_Capacity_range_missing (0.01s)
+ --- PASS: TestControllerExpandVolume/disk_type_is_not_managedDisk (0.00s)
+ --- PASS: TestControllerExpandVolume/Disk_URI_not_valid (0.01s)
+ --- PASS: TestControllerExpandVolume/DiskSize_missing (0.02s)
+=== RUN TestCreateSnapshot
+=== RUN TestCreateSnapshot/Source_volume_ID_missing
+I0618 07:27:00.479977 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_VOLUME
+I0618 07:27:00.480009 1076117 driver.go:81] Enabling controller service capability: PUBLISH_UNPUBLISH_VOLUME
+I0618 07:27:00.480015 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_SNAPSHOT
+I0618 07:27:00.480020 1076117 driver.go:81] Enabling controller service capability: LIST_SNAPSHOTS
+I0618 07:27:00.480025 1076117 driver.go:81] Enabling controller service capability: CLONE_VOLUME
+I0618 07:27:00.480030 1076117 driver.go:81] Enabling controller service capability: EXPAND_VOLUME
+I0618 07:27:00.480035 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES
+I0618 07:27:00.480040 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES_PUBLISHED_NODES
+I0618 07:27:00.480047 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_WRITER
+=== RUN TestCreateSnapshot/Snapshot_name_missing
+I0618 07:27:00.495220 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_VOLUME
+I0618 07:27:00.495240 1076117 driver.go:81] Enabling controller service capability: PUBLISH_UNPUBLISH_VOLUME
+I0618 07:27:00.495246 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_SNAPSHOT
+I0618 07:27:00.495251 1076117 driver.go:81] Enabling controller service capability: LIST_SNAPSHOTS
+I0618 07:27:00.495256 1076117 driver.go:81] Enabling controller service capability: CLONE_VOLUME
+I0618 07:27:00.495261 1076117 driver.go:81] Enabling controller service capability: EXPAND_VOLUME
+I0618 07:27:00.495266 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES
+I0618 07:27:00.495271 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES_PUBLISHED_NODES
+I0618 07:27:00.495277 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_WRITER
+=== RUN TestCreateSnapshot/Invalid_parameter_option
+I0618 07:27:00.510136 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_VOLUME
+I0618 07:27:00.510156 1076117 driver.go:81] Enabling controller service capability: PUBLISH_UNPUBLISH_VOLUME
+I0618 07:27:00.510167 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_SNAPSHOT
+I0618 07:27:00.510174 1076117 driver.go:81] Enabling controller service capability: LIST_SNAPSHOTS
+I0618 07:27:00.510179 1076117 driver.go:81] Enabling controller service capability: CLONE_VOLUME
+I0618 07:27:00.510184 1076117 driver.go:81] Enabling controller service capability: EXPAND_VOLUME
+I0618 07:27:00.510188 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES
+I0618 07:27:00.510193 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES_PUBLISHED_NODES
+I0618 07:27:00.510207 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_WRITER
+=== RUN TestCreateSnapshot/Invalid_volume_ID
+I0618 07:27:00.528385 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_VOLUME
+I0618 07:27:00.528416 1076117 driver.go:81] Enabling controller service capability: PUBLISH_UNPUBLISH_VOLUME
+I0618 07:27:00.528423 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_SNAPSHOT
+I0618 07:27:00.528428 1076117 driver.go:81] Enabling controller service capability: LIST_SNAPSHOTS
+I0618 07:27:00.528433 1076117 driver.go:81] Enabling controller service capability: CLONE_VOLUME
+I0618 07:27:00.528438 1076117 driver.go:81] Enabling controller service capability: EXPAND_VOLUME
+I0618 07:27:00.528442 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES
+I0618 07:27:00.528446 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES_PUBLISHED_NODES
+I0618 07:27:00.528453 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_WRITER
+=== RUN TestCreateSnapshot/Invalid_tag_
+I0618 07:27:00.544393 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_VOLUME
+I0618 07:27:00.544414 1076117 driver.go:81] Enabling controller service capability: PUBLISH_UNPUBLISH_VOLUME
+I0618 07:27:00.544420 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_SNAPSHOT
+I0618 07:27:00.544425 1076117 driver.go:81] Enabling controller service capability: LIST_SNAPSHOTS
+I0618 07:27:00.544432 1076117 driver.go:81] Enabling controller service capability: CLONE_VOLUME
+I0618 07:27:00.544437 1076117 driver.go:81] Enabling controller service capability: EXPAND_VOLUME
+I0618 07:27:00.544442 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES
+I0618 07:27:00.544447 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES_PUBLISHED_NODES
+I0618 07:27:00.544453 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_WRITER
+=== RUN TestCreateSnapshot/create_snapshot_error_
+I0618 07:27:00.557613 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_VOLUME
+I0618 07:27:00.557632 1076117 driver.go:81] Enabling controller service capability: PUBLISH_UNPUBLISH_VOLUME
+I0618 07:27:00.557638 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_SNAPSHOT
+I0618 07:27:00.557643 1076117 driver.go:81] Enabling controller service capability: LIST_SNAPSHOTS
+I0618 07:27:00.557648 1076117 driver.go:81] Enabling controller service capability: CLONE_VOLUME
+I0618 07:27:00.557653 1076117 driver.go:81] Enabling controller service capability: EXPAND_VOLUME
+I0618 07:27:00.557658 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES
+I0618 07:27:00.557663 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES_PUBLISHED_NODES
+I0618 07:27:00.557669 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_WRITER
+=== RUN TestCreateSnapshot/create_snapshot_already_exist_
+I0618 07:27:00.584262 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_VOLUME
+I0618 07:27:00.584284 1076117 driver.go:81] Enabling controller service capability: PUBLISH_UNPUBLISH_VOLUME
+I0618 07:27:00.584290 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_SNAPSHOT
+I0618 07:27:00.584300 1076117 driver.go:81] Enabling controller service capability: LIST_SNAPSHOTS
+I0618 07:27:00.584305 1076117 driver.go:81] Enabling controller service capability: CLONE_VOLUME
+I0618 07:27:00.584310 1076117 driver.go:81] Enabling controller service capability: EXPAND_VOLUME
+I0618 07:27:00.584315 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES
+I0618 07:27:00.584319 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES_PUBLISHED_NODES
+I0618 07:27:00.584325 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_WRITER
+=== RUN TestCreateSnapshot/Get_Snapshot_ID_error_
+I0618 07:27:00.588022 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_VOLUME
+I0618 07:27:00.588037 1076117 driver.go:81] Enabling controller service capability: PUBLISH_UNPUBLISH_VOLUME
+I0618 07:27:00.588042 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_SNAPSHOT
+I0618 07:27:00.588051 1076117 driver.go:81] Enabling controller service capability: LIST_SNAPSHOTS
+I0618 07:27:00.588056 1076117 driver.go:81] Enabling controller service capability: CLONE_VOLUME
+I0618 07:27:00.588060 1076117 driver.go:81] Enabling controller service capability: EXPAND_VOLUME
+I0618 07:27:00.588064 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES
+I0618 07:27:00.588068 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES_PUBLISHED_NODES
+I0618 07:27:00.588072 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_WRITER
+=== RUN TestCreateSnapshot/valid_request_
+I0618 07:27:00.610350 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_VOLUME
+I0618 07:27:00.610371 1076117 driver.go:81] Enabling controller service capability: PUBLISH_UNPUBLISH_VOLUME
+I0618 07:27:00.610376 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_SNAPSHOT
+I0618 07:27:00.610381 1076117 driver.go:81] Enabling controller service capability: LIST_SNAPSHOTS
+I0618 07:27:00.610386 1076117 driver.go:81] Enabling controller service capability: CLONE_VOLUME
+I0618 07:27:00.610391 1076117 driver.go:81] Enabling controller service capability: EXPAND_VOLUME
+I0618 07:27:00.610396 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES
+I0618 07:27:00.610400 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES_PUBLISHED_NODES
+I0618 07:27:00.610406 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_WRITER
+W0618 07:27:00.610592 1076117 azure_disk_utils.go:342] the requested volume name ("testurl/subscriptions/23/providers/Microsoft.Compute/snapshots/snapshot-name") is invalid, so it is regenerated as ("pvc-disk-dynamic-26f0a0b5-2d44-11ef-b63d-000d3ac772b5")
+--- PASS: TestCreateSnapshot (0.15s)
+ --- PASS: TestCreateSnapshot/Source_volume_ID_missing (0.02s)
+ --- PASS: TestCreateSnapshot/Snapshot_name_missing (0.02s)
+ --- PASS: TestCreateSnapshot/Invalid_parameter_option (0.01s)
+ --- PASS: TestCreateSnapshot/Invalid_volume_ID (0.02s)
+ --- PASS: TestCreateSnapshot/Invalid_tag_ (0.02s)
+ --- PASS: TestCreateSnapshot/create_snapshot_error_ (0.01s)
+ --- PASS: TestCreateSnapshot/create_snapshot_already_exist_ (0.03s)
+ --- PASS: TestCreateSnapshot/Get_Snapshot_ID_error_ (0.00s)
+ --- PASS: TestCreateSnapshot/valid_request_ (0.02s)
+=== RUN TestDeleteSnapshot
+=== RUN TestDeleteSnapshot/Snapshot_ID_missing
+I0618 07:27:00.631446 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_VOLUME
+I0618 07:27:00.631468 1076117 driver.go:81] Enabling controller service capability: PUBLISH_UNPUBLISH_VOLUME
+I0618 07:27:00.631473 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_SNAPSHOT
+I0618 07:27:00.631478 1076117 driver.go:81] Enabling controller service capability: LIST_SNAPSHOTS
+I0618 07:27:00.631486 1076117 driver.go:81] Enabling controller service capability: CLONE_VOLUME
+I0618 07:27:00.631490 1076117 driver.go:81] Enabling controller service capability: EXPAND_VOLUME
+I0618 07:27:00.631495 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES
+I0618 07:27:00.631499 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES_PUBLISHED_NODES
+I0618 07:27:00.631507 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_WRITER
+=== RUN TestDeleteSnapshot/Snapshot_ID_invalid
+I0618 07:27:00.650584 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_VOLUME
+I0618 07:27:00.650605 1076117 driver.go:81] Enabling controller service capability: PUBLISH_UNPUBLISH_VOLUME
+I0618 07:27:00.650616 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_SNAPSHOT
+I0618 07:27:00.650621 1076117 driver.go:81] Enabling controller service capability: LIST_SNAPSHOTS
+I0618 07:27:00.650626 1076117 driver.go:81] Enabling controller service capability: CLONE_VOLUME
+I0618 07:27:00.650631 1076117 driver.go:81] Enabling controller service capability: EXPAND_VOLUME
+I0618 07:27:00.650636 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES
+I0618 07:27:00.650641 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES_PUBLISHED_NODES
+I0618 07:27:00.650652 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_WRITER
+=== RUN TestDeleteSnapshot/delete_Snapshot_error
+I0618 07:27:00.674738 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_VOLUME
+I0618 07:27:00.674760 1076117 driver.go:81] Enabling controller service capability: PUBLISH_UNPUBLISH_VOLUME
+I0618 07:27:00.674770 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_SNAPSHOT
+I0618 07:27:00.674776 1076117 driver.go:81] Enabling controller service capability: LIST_SNAPSHOTS
+I0618 07:27:00.674781 1076117 driver.go:81] Enabling controller service capability: CLONE_VOLUME
+I0618 07:27:00.674786 1076117 driver.go:81] Enabling controller service capability: EXPAND_VOLUME
+I0618 07:27:00.674790 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES
+I0618 07:27:00.674795 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES_PUBLISHED_NODES
+I0618 07:27:00.674801 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_WRITER
+=== RUN TestDeleteSnapshot/Valid_delete_Snapshot_
+I0618 07:27:00.698876 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_VOLUME
+I0618 07:27:00.698902 1076117 driver.go:81] Enabling controller service capability: PUBLISH_UNPUBLISH_VOLUME
+I0618 07:27:00.698908 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_SNAPSHOT
+I0618 07:27:00.698913 1076117 driver.go:81] Enabling controller service capability: LIST_SNAPSHOTS
+I0618 07:27:00.698918 1076117 driver.go:81] Enabling controller service capability: CLONE_VOLUME
+I0618 07:27:00.698924 1076117 driver.go:81] Enabling controller service capability: EXPAND_VOLUME
+I0618 07:27:00.698930 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES
+I0618 07:27:00.698938 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES_PUBLISHED_NODES
+I0618 07:27:00.698944 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_WRITER
+--- PASS: TestDeleteSnapshot (0.09s)
+ --- PASS: TestDeleteSnapshot/Snapshot_ID_missing (0.02s)
+ --- PASS: TestDeleteSnapshot/Snapshot_ID_invalid (0.02s)
+ --- PASS: TestDeleteSnapshot/delete_Snapshot_error (0.02s)
+ --- PASS: TestDeleteSnapshot/Valid_delete_Snapshot_ (0.02s)
+=== RUN TestGetSnapshotByID
+=== RUN TestGetSnapshotByID/snapshotID_not_valid
+I0618 07:27:00.710533 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_VOLUME
+I0618 07:27:00.710553 1076117 driver.go:81] Enabling controller service capability: PUBLISH_UNPUBLISH_VOLUME
+I0618 07:27:00.710558 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_SNAPSHOT
+I0618 07:27:00.710563 1076117 driver.go:81] Enabling controller service capability: LIST_SNAPSHOTS
+I0618 07:27:00.710568 1076117 driver.go:81] Enabling controller service capability: CLONE_VOLUME
+I0618 07:27:00.710573 1076117 driver.go:81] Enabling controller service capability: EXPAND_VOLUME
+I0618 07:27:00.710577 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES
+I0618 07:27:00.710582 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES_PUBLISHED_NODES
+I0618 07:27:00.710587 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_WRITER
+=== RUN TestGetSnapshotByID/snapshot_get_error
+I0618 07:27:00.716226 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_VOLUME
+I0618 07:27:00.716251 1076117 driver.go:81] Enabling controller service capability: PUBLISH_UNPUBLISH_VOLUME
+I0618 07:27:00.716258 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_SNAPSHOT
+I0618 07:27:00.716268 1076117 driver.go:81] Enabling controller service capability: LIST_SNAPSHOTS
+I0618 07:27:00.716272 1076117 driver.go:81] Enabling controller service capability: CLONE_VOLUME
+I0618 07:27:00.716277 1076117 driver.go:81] Enabling controller service capability: EXPAND_VOLUME
+I0618 07:27:00.716282 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES
+I0618 07:27:00.716286 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES_PUBLISHED_NODES
+I0618 07:27:00.716293 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_WRITER
+--- PASS: TestGetSnapshotByID (0.02s)
+ --- PASS: TestGetSnapshotByID/snapshotID_not_valid (0.01s)
+ --- PASS: TestGetSnapshotByID/snapshot_get_error (0.01s)
+=== RUN TestListSnapshots
+=== RUN TestListSnapshots/snapshotID_not_valid
+I0618 07:27:00.726205 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_VOLUME
+I0618 07:27:00.726220 1076117 driver.go:81] Enabling controller service capability: PUBLISH_UNPUBLISH_VOLUME
+I0618 07:27:00.726229 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_SNAPSHOT
+I0618 07:27:00.726233 1076117 driver.go:81] Enabling controller service capability: LIST_SNAPSHOTS
+I0618 07:27:00.726236 1076117 driver.go:81] Enabling controller service capability: CLONE_VOLUME
+I0618 07:27:00.726239 1076117 driver.go:81] Enabling controller service capability: EXPAND_VOLUME
+I0618 07:27:00.726242 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES
+I0618 07:27:00.726245 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES_PUBLISHED_NODES
+I0618 07:27:00.726250 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_WRITER
+=== RUN TestListSnapshots/valid_List
+I0618 07:27:00.730798 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_VOLUME
+I0618 07:27:00.730819 1076117 driver.go:81] Enabling controller service capability: PUBLISH_UNPUBLISH_VOLUME
+I0618 07:27:00.730826 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_SNAPSHOT
+I0618 07:27:00.730831 1076117 driver.go:81] Enabling controller service capability: LIST_SNAPSHOTS
+I0618 07:27:00.730839 1076117 driver.go:81] Enabling controller service capability: CLONE_VOLUME
+I0618 07:27:00.730845 1076117 driver.go:81] Enabling controller service capability: EXPAND_VOLUME
+I0618 07:27:00.730849 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES
+I0618 07:27:00.730855 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES_PUBLISHED_NODES
+I0618 07:27:00.730861 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_WRITER
+=== RUN TestListSnapshots/List_resource_error
+I0618 07:27:00.736074 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_VOLUME
+I0618 07:27:00.736090 1076117 driver.go:81] Enabling controller service capability: PUBLISH_UNPUBLISH_VOLUME
+I0618 07:27:00.736096 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_SNAPSHOT
+I0618 07:27:00.736101 1076117 driver.go:81] Enabling controller service capability: LIST_SNAPSHOTS
+I0618 07:27:00.736107 1076117 driver.go:81] Enabling controller service capability: CLONE_VOLUME
+I0618 07:27:00.736112 1076117 driver.go:81] Enabling controller service capability: EXPAND_VOLUME
+I0618 07:27:00.736117 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES
+I0618 07:27:00.736122 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES_PUBLISHED_NODES
+I0618 07:27:00.736128 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_WRITER
+=== RUN TestListSnapshots/snapshot_property_nil
+I0618 07:27:00.739962 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_VOLUME
+I0618 07:27:00.739981 1076117 driver.go:81] Enabling controller service capability: PUBLISH_UNPUBLISH_VOLUME
+I0618 07:27:00.739987 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_SNAPSHOT
+I0618 07:27:00.739992 1076117 driver.go:81] Enabling controller service capability: LIST_SNAPSHOTS
+I0618 07:27:00.739997 1076117 driver.go:81] Enabling controller service capability: CLONE_VOLUME
+I0618 07:27:00.740005 1076117 driver.go:81] Enabling controller service capability: EXPAND_VOLUME
+I0618 07:27:00.740010 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES
+I0618 07:27:00.740014 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES_PUBLISHED_NODES
+I0618 07:27:00.740020 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_WRITER
+=== RUN TestListSnapshots/List_snapshots_when_source_volumeId_is_given
+I0618 07:27:00.744895 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_VOLUME
+I0618 07:27:00.744911 1076117 driver.go:81] Enabling controller service capability: PUBLISH_UNPUBLISH_VOLUME
+I0618 07:27:00.744917 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_SNAPSHOT
+I0618 07:27:00.744922 1076117 driver.go:81] Enabling controller service capability: LIST_SNAPSHOTS
+I0618 07:27:00.744927 1076117 driver.go:81] Enabling controller service capability: CLONE_VOLUME
+I0618 07:27:00.744931 1076117 driver.go:81] Enabling controller service capability: EXPAND_VOLUME
+I0618 07:27:00.744936 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES
+I0618 07:27:00.744941 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES_PUBLISHED_NODES
+I0618 07:27:00.744946 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_WRITER
+--- PASS: TestListSnapshots (0.03s)
+ --- PASS: TestListSnapshots/snapshotID_not_valid (0.01s)
+ --- PASS: TestListSnapshots/valid_List (0.00s)
+ --- PASS: TestListSnapshots/List_resource_error (0.01s)
+ --- PASS: TestListSnapshots/snapshot_property_nil (0.00s)
+ --- PASS: TestListSnapshots/List_snapshots_when_source_volumeId_is_given (0.00s)
+=== RUN TestGetCapacity
+I0618 07:27:00.749827 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_VOLUME
+I0618 07:27:00.749846 1076117 driver.go:81] Enabling controller service capability: PUBLISH_UNPUBLISH_VOLUME
+I0618 07:27:00.749852 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_SNAPSHOT
+I0618 07:27:00.749857 1076117 driver.go:81] Enabling controller service capability: LIST_SNAPSHOTS
+I0618 07:27:00.749861 1076117 driver.go:81] Enabling controller service capability: CLONE_VOLUME
+I0618 07:27:00.749867 1076117 driver.go:81] Enabling controller service capability: EXPAND_VOLUME
+I0618 07:27:00.749871 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES
+I0618 07:27:00.749875 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES_PUBLISHED_NODES
+I0618 07:27:00.749881 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_WRITER
+--- PASS: TestGetCapacity (0.00s)
+=== RUN TestListVolumes
+=== RUN TestListVolumes/When_no_KubeClient_exists,_Valid_list_without_max_entries_or_starting_token
+I0618 07:27:00.757044 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_VOLUME
+I0618 07:27:00.757062 1076117 driver.go:81] Enabling controller service capability: PUBLISH_UNPUBLISH_VOLUME
+I0618 07:27:00.757067 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_SNAPSHOT
+I0618 07:27:00.757072 1076117 driver.go:81] Enabling controller service capability: LIST_SNAPSHOTS
+I0618 07:27:00.757077 1076117 driver.go:81] Enabling controller service capability: CLONE_VOLUME
+I0618 07:27:00.757082 1076117 driver.go:81] Enabling controller service capability: EXPAND_VOLUME
+I0618 07:27:00.757086 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES
+I0618 07:27:00.757090 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES_PUBLISHED_NODES
+I0618 07:27:00.757096 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_WRITER
+=== RUN TestListVolumes/When_no_KubeClient_exists,_Valid_list_with_max_entries
+I0618 07:27:00.766366 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_VOLUME
+I0618 07:27:00.766386 1076117 driver.go:81] Enabling controller service capability: PUBLISH_UNPUBLISH_VOLUME
+I0618 07:27:00.766392 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_SNAPSHOT
+I0618 07:27:00.766397 1076117 driver.go:81] Enabling controller service capability: LIST_SNAPSHOTS
+I0618 07:27:00.766402 1076117 driver.go:81] Enabling controller service capability: CLONE_VOLUME
+I0618 07:27:00.766408 1076117 driver.go:81] Enabling controller service capability: EXPAND_VOLUME
+I0618 07:27:00.766413 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES
+I0618 07:27:00.766418 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES_PUBLISHED_NODES
+I0618 07:27:00.766424 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_WRITER
+=== RUN TestListVolumes/When_no_KubeClient_exists,_Valid_list_with_max_entries_and_starting_token
+I0618 07:27:00.771026 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_VOLUME
+I0618 07:27:00.771043 1076117 driver.go:81] Enabling controller service capability: PUBLISH_UNPUBLISH_VOLUME
+I0618 07:27:00.771048 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_SNAPSHOT
+I0618 07:27:00.771053 1076117 driver.go:81] Enabling controller service capability: LIST_SNAPSHOTS
+I0618 07:27:00.771058 1076117 driver.go:81] Enabling controller service capability: CLONE_VOLUME
+I0618 07:27:00.771064 1076117 driver.go:81] Enabling controller service capability: EXPAND_VOLUME
+I0618 07:27:00.771072 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES
+I0618 07:27:00.771077 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES_PUBLISHED_NODES
+I0618 07:27:00.771083 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_WRITER
+=== RUN TestListVolumes/When_no_KubeClient_exists,_ListVolumes_request_with_starting_token_but_no_entries_in_response
+I0618 07:27:00.775369 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_VOLUME
+I0618 07:27:00.775384 1076117 driver.go:81] Enabling controller service capability: PUBLISH_UNPUBLISH_VOLUME
+I0618 07:27:00.775390 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_SNAPSHOT
+I0618 07:27:00.775395 1076117 driver.go:81] Enabling controller service capability: LIST_SNAPSHOTS
+I0618 07:27:00.775400 1076117 driver.go:81] Enabling controller service capability: CLONE_VOLUME
+I0618 07:27:00.775405 1076117 driver.go:81] Enabling controller service capability: EXPAND_VOLUME
+I0618 07:27:00.775410 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES
+I0618 07:27:00.775415 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES_PUBLISHED_NODES
+I0618 07:27:00.775421 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_WRITER
+=== RUN TestListVolumes/When_no_KubeClient_exists,_ListVolumes_list_resource_error
+I0618 07:27:00.779594 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_VOLUME
+I0618 07:27:00.779608 1076117 driver.go:81] Enabling controller service capability: PUBLISH_UNPUBLISH_VOLUME
+I0618 07:27:00.779613 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_SNAPSHOT
+I0618 07:27:00.779618 1076117 driver.go:81] Enabling controller service capability: LIST_SNAPSHOTS
+I0618 07:27:00.779623 1076117 driver.go:81] Enabling controller service capability: CLONE_VOLUME
+I0618 07:27:00.779628 1076117 driver.go:81] Enabling controller service capability: EXPAND_VOLUME
+I0618 07:27:00.779633 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES
+I0618 07:27:00.779638 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES_PUBLISHED_NODES
+I0618 07:27:00.779643 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_WRITER
+=== RUN TestListVolumes/When_KubeClient_exists,_Empty_list_without_start_token_should_not_return_error
+I0618 07:27:00.783140 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_VOLUME
+I0618 07:27:00.783152 1076117 driver.go:81] Enabling controller service capability: PUBLISH_UNPUBLISH_VOLUME
+I0618 07:27:00.783155 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_SNAPSHOT
+I0618 07:27:00.783158 1076117 driver.go:81] Enabling controller service capability: LIST_SNAPSHOTS
+I0618 07:27:00.783162 1076117 driver.go:81] Enabling controller service capability: CLONE_VOLUME
+I0618 07:27:00.783164 1076117 driver.go:81] Enabling controller service capability: EXPAND_VOLUME
+I0618 07:27:00.783167 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES
+I0618 07:27:00.783170 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES_PUBLISHED_NODES
+I0618 07:27:00.783174 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_WRITER
+=== RUN TestListVolumes/When_KubeClient_exists,_Valid_list_without_max_entries_or_starting_token
+I0618 07:27:00.788937 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_VOLUME
+I0618 07:27:00.788956 1076117 driver.go:81] Enabling controller service capability: PUBLISH_UNPUBLISH_VOLUME
+I0618 07:27:00.788963 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_SNAPSHOT
+I0618 07:27:00.788972 1076117 driver.go:81] Enabling controller service capability: LIST_SNAPSHOTS
+I0618 07:27:00.788979 1076117 driver.go:81] Enabling controller service capability: CLONE_VOLUME
+I0618 07:27:00.788986 1076117 driver.go:81] Enabling controller service capability: EXPAND_VOLUME
+I0618 07:27:00.788992 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES
+I0618 07:27:00.788998 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES_PUBLISHED_NODES
+I0618 07:27:00.789005 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_WRITER
+=== RUN TestListVolumes/When_KubeClient_exists,_Valid_list_with_max_entries
+I0618 07:27:00.794310 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_VOLUME
+I0618 07:27:00.794326 1076117 driver.go:81] Enabling controller service capability: PUBLISH_UNPUBLISH_VOLUME
+I0618 07:27:00.794331 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_SNAPSHOT
+I0618 07:27:00.794336 1076117 driver.go:81] Enabling controller service capability: LIST_SNAPSHOTS
+I0618 07:27:00.794341 1076117 driver.go:81] Enabling controller service capability: CLONE_VOLUME
+I0618 07:27:00.794346 1076117 driver.go:81] Enabling controller service capability: EXPAND_VOLUME
+I0618 07:27:00.794350 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES
+I0618 07:27:00.794355 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES_PUBLISHED_NODES
+I0618 07:27:00.794361 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_WRITER
+=== RUN TestListVolumes/When_KubeClient_exists,_Valid_list_with_max_entries_and_starting_token
+I0618 07:27:00.797627 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_VOLUME
+I0618 07:27:00.797637 1076117 driver.go:81] Enabling controller service capability: PUBLISH_UNPUBLISH_VOLUME
+I0618 07:27:00.797641 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_SNAPSHOT
+I0618 07:27:00.797644 1076117 driver.go:81] Enabling controller service capability: LIST_SNAPSHOTS
+I0618 07:27:00.797647 1076117 driver.go:81] Enabling controller service capability: CLONE_VOLUME
+I0618 07:27:00.797650 1076117 driver.go:81] Enabling controller service capability: EXPAND_VOLUME
+I0618 07:27:00.797653 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES
+I0618 07:27:00.797656 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES_PUBLISHED_NODES
+I0618 07:27:00.797659 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_WRITER
+=== RUN TestListVolumes/When_KubeClient_exists,_ListVolumes_request_with_starting_token_but_no_entries_in_response
+I0618 07:27:00.800823 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_VOLUME
+I0618 07:27:00.800837 1076117 driver.go:81] Enabling controller service capability: PUBLISH_UNPUBLISH_VOLUME
+I0618 07:27:00.800842 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_SNAPSHOT
+I0618 07:27:00.800847 1076117 driver.go:81] Enabling controller service capability: LIST_SNAPSHOTS
+I0618 07:27:00.800853 1076117 driver.go:81] Enabling controller service capability: CLONE_VOLUME
+I0618 07:27:00.800859 1076117 driver.go:81] Enabling controller service capability: EXPAND_VOLUME
+I0618 07:27:00.800864 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES
+I0618 07:27:00.800868 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES_PUBLISHED_NODES
+I0618 07:27:00.800873 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_WRITER
+=== RUN TestListVolumes/When_KubeClient_exists,_ListVolumes_list_pv_error
+I0618 07:27:00.803907 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_VOLUME
+I0618 07:27:00.803918 1076117 driver.go:81] Enabling controller service capability: PUBLISH_UNPUBLISH_VOLUME
+I0618 07:27:00.803925 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_SNAPSHOT
+I0618 07:27:00.803928 1076117 driver.go:81] Enabling controller service capability: LIST_SNAPSHOTS
+I0618 07:27:00.803931 1076117 driver.go:81] Enabling controller service capability: CLONE_VOLUME
+I0618 07:27:00.803934 1076117 driver.go:81] Enabling controller service capability: EXPAND_VOLUME
+I0618 07:27:00.803937 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES
+I0618 07:27:00.803940 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES_PUBLISHED_NODES
+I0618 07:27:00.803943 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_WRITER
+--- PASS: TestListVolumes (0.05s)
+ --- PASS: TestListVolumes/When_no_KubeClient_exists,_Valid_list_without_max_entries_or_starting_token (0.01s)
+ --- PASS: TestListVolumes/When_no_KubeClient_exists,_Valid_list_with_max_entries (0.01s)
+ --- PASS: TestListVolumes/When_no_KubeClient_exists,_Valid_list_with_max_entries_and_starting_token (0.00s)
+ --- PASS: TestListVolumes/When_no_KubeClient_exists,_ListVolumes_request_with_starting_token_but_no_entries_in_response (0.00s)
+ --- PASS: TestListVolumes/When_no_KubeClient_exists,_ListVolumes_list_resource_error (0.00s)
+ --- PASS: TestListVolumes/When_KubeClient_exists,_Empty_list_without_start_token_should_not_return_error (0.00s)
+ --- PASS: TestListVolumes/When_KubeClient_exists,_Valid_list_without_max_entries_or_starting_token (0.01s)
+ --- PASS: TestListVolumes/When_KubeClient_exists,_Valid_list_with_max_entries (0.01s)
+ --- PASS: TestListVolumes/When_KubeClient_exists,_Valid_list_with_max_entries_and_starting_token (0.00s)
+ --- PASS: TestListVolumes/When_KubeClient_exists,_ListVolumes_request_with_starting_token_but_no_entries_in_response (0.00s)
+ --- PASS: TestListVolumes/When_KubeClient_exists,_ListVolumes_list_pv_error (0.00s)
+=== RUN TestValidateVolumeCapabilities
+=== RUN TestValidateVolumeCapabilities/Volume_ID_missing_
+I0618 07:27:00.810036 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_VOLUME
+I0618 07:27:00.810048 1076117 driver.go:81] Enabling controller service capability: PUBLISH_UNPUBLISH_VOLUME
+I0618 07:27:00.810051 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_SNAPSHOT
+I0618 07:27:00.810054 1076117 driver.go:81] Enabling controller service capability: LIST_SNAPSHOTS
+I0618 07:27:00.810058 1076117 driver.go:81] Enabling controller service capability: CLONE_VOLUME
+I0618 07:27:00.810061 1076117 driver.go:81] Enabling controller service capability: EXPAND_VOLUME
+I0618 07:27:00.810064 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES
+I0618 07:27:00.810066 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES_PUBLISHED_NODES
+I0618 07:27:00.810070 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_WRITER
+=== RUN TestValidateVolumeCapabilities/Volume_capabilities_missing_
+I0618 07:27:00.812700 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_VOLUME
+I0618 07:27:00.812714 1076117 driver.go:81] Enabling controller service capability: PUBLISH_UNPUBLISH_VOLUME
+I0618 07:27:00.812739 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_SNAPSHOT
+I0618 07:27:00.812750 1076117 driver.go:81] Enabling controller service capability: LIST_SNAPSHOTS
+I0618 07:27:00.812754 1076117 driver.go:81] Enabling controller service capability: CLONE_VOLUME
+I0618 07:27:00.812759 1076117 driver.go:81] Enabling controller service capability: EXPAND_VOLUME
+I0618 07:27:00.812763 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES
+I0618 07:27:00.812768 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES_PUBLISHED_NODES
+I0618 07:27:00.812773 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_WRITER
+=== RUN TestValidateVolumeCapabilities/check_disk_err_
+I0618 07:27:00.816180 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_VOLUME
+I0618 07:27:00.816194 1076117 driver.go:81] Enabling controller service capability: PUBLISH_UNPUBLISH_VOLUME
+I0618 07:27:00.816199 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_SNAPSHOT
+I0618 07:27:00.816203 1076117 driver.go:81] Enabling controller service capability: LIST_SNAPSHOTS
+I0618 07:27:00.816208 1076117 driver.go:81] Enabling controller service capability: CLONE_VOLUME
+I0618 07:27:00.816212 1076117 driver.go:81] Enabling controller service capability: EXPAND_VOLUME
+I0618 07:27:00.816218 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES
+I0618 07:27:00.816223 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES_PUBLISHED_NODES
+I0618 07:27:00.816230 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_WRITER
+=== RUN TestValidateVolumeCapabilities/invalid_req_
+I0618 07:27:00.819252 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_VOLUME
+I0618 07:27:00.819267 1076117 driver.go:81] Enabling controller service capability: PUBLISH_UNPUBLISH_VOLUME
+I0618 07:27:00.819272 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_SNAPSHOT
+I0618 07:27:00.819277 1076117 driver.go:81] Enabling controller service capability: LIST_SNAPSHOTS
+I0618 07:27:00.819282 1076117 driver.go:81] Enabling controller service capability: CLONE_VOLUME
+I0618 07:27:00.819286 1076117 driver.go:81] Enabling controller service capability: EXPAND_VOLUME
+I0618 07:27:00.819290 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES
+I0618 07:27:00.819295 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES_PUBLISHED_NODES
+I0618 07:27:00.819300 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_WRITER
+=== RUN TestValidateVolumeCapabilities/valid_req_
+I0618 07:27:00.827987 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_VOLUME
+I0618 07:27:00.827999 1076117 driver.go:81] Enabling controller service capability: PUBLISH_UNPUBLISH_VOLUME
+I0618 07:27:00.828003 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_SNAPSHOT
+I0618 07:27:00.828006 1076117 driver.go:81] Enabling controller service capability: LIST_SNAPSHOTS
+I0618 07:27:00.828009 1076117 driver.go:81] Enabling controller service capability: CLONE_VOLUME
+I0618 07:27:00.828015 1076117 driver.go:81] Enabling controller service capability: EXPAND_VOLUME
+I0618 07:27:00.828018 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES
+I0618 07:27:00.828020 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES_PUBLISHED_NODES
+I0618 07:27:00.828024 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_WRITER
+--- PASS: TestValidateVolumeCapabilities (0.02s)
+ --- PASS: TestValidateVolumeCapabilities/Volume_ID_missing_ (0.01s)
+ --- PASS: TestValidateVolumeCapabilities/Volume_capabilities_missing_ (0.00s)
+ --- PASS: TestValidateVolumeCapabilities/check_disk_err_ (0.00s)
+ --- PASS: TestValidateVolumeCapabilities/invalid_req_ (0.00s)
+ --- PASS: TestValidateVolumeCapabilities/valid_req_ (0.01s)
+=== RUN TestGetSourceDiskSize
+=== RUN TestGetSourceDiskSize/max_depth_reached
+I0618 07:27:00.831737 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_VOLUME
+I0618 07:27:00.831753 1076117 driver.go:81] Enabling controller service capability: PUBLISH_UNPUBLISH_VOLUME
+I0618 07:27:00.831758 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_SNAPSHOT
+I0618 07:27:00.831763 1076117 driver.go:81] Enabling controller service capability: LIST_SNAPSHOTS
+I0618 07:27:00.831767 1076117 driver.go:81] Enabling controller service capability: CLONE_VOLUME
+I0618 07:27:00.831771 1076117 driver.go:81] Enabling controller service capability: EXPAND_VOLUME
+I0618 07:27:00.831776 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES
+I0618 07:27:00.831781 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES_PUBLISHED_NODES
+I0618 07:27:00.831789 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_WRITER
+=== RUN TestGetSourceDiskSize/diskproperty_not_found
+I0618 07:27:00.834998 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_VOLUME
+I0618 07:27:00.835013 1076117 driver.go:81] Enabling controller service capability: PUBLISH_UNPUBLISH_VOLUME
+I0618 07:27:00.835018 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_SNAPSHOT
+I0618 07:27:00.835022 1076117 driver.go:81] Enabling controller service capability: LIST_SNAPSHOTS
+I0618 07:27:00.835026 1076117 driver.go:81] Enabling controller service capability: CLONE_VOLUME
+I0618 07:27:00.835031 1076117 driver.go:81] Enabling controller service capability: EXPAND_VOLUME
+I0618 07:27:00.835036 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES
+I0618 07:27:00.835042 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES_PUBLISHED_NODES
+I0618 07:27:00.835049 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_WRITER
+=== RUN TestGetSourceDiskSize/nil_DiskSizeGB
+I0618 07:27:00.840410 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_VOLUME
+I0618 07:27:00.840424 1076117 driver.go:81] Enabling controller service capability: PUBLISH_UNPUBLISH_VOLUME
+I0618 07:27:00.840429 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_SNAPSHOT
+I0618 07:27:00.840434 1076117 driver.go:81] Enabling controller service capability: LIST_SNAPSHOTS
+I0618 07:27:00.840441 1076117 driver.go:81] Enabling controller service capability: CLONE_VOLUME
+I0618 07:27:00.840446 1076117 driver.go:81] Enabling controller service capability: EXPAND_VOLUME
+I0618 07:27:00.840450 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES
+I0618 07:27:00.840454 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES_PUBLISHED_NODES
+I0618 07:27:00.840460 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_WRITER
+=== RUN TestGetSourceDiskSize/successful_search:_depth_1
+I0618 07:27:00.844773 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_VOLUME
+I0618 07:27:00.844794 1076117 driver.go:81] Enabling controller service capability: PUBLISH_UNPUBLISH_VOLUME
+I0618 07:27:00.844799 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_SNAPSHOT
+I0618 07:27:00.844804 1076117 driver.go:81] Enabling controller service capability: LIST_SNAPSHOTS
+I0618 07:27:00.844808 1076117 driver.go:81] Enabling controller service capability: CLONE_VOLUME
+I0618 07:27:00.844814 1076117 driver.go:81] Enabling controller service capability: EXPAND_VOLUME
+I0618 07:27:00.844818 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES
+I0618 07:27:00.844823 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES_PUBLISHED_NODES
+I0618 07:27:00.844828 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_WRITER
+=== RUN TestGetSourceDiskSize/successful_search:_depth_2
+I0618 07:27:00.848416 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_VOLUME
+I0618 07:27:00.848434 1076117 driver.go:81] Enabling controller service capability: PUBLISH_UNPUBLISH_VOLUME
+I0618 07:27:00.848443 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_SNAPSHOT
+I0618 07:27:00.848449 1076117 driver.go:81] Enabling controller service capability: LIST_SNAPSHOTS
+I0618 07:27:00.848453 1076117 driver.go:81] Enabling controller service capability: CLONE_VOLUME
+I0618 07:27:00.848458 1076117 driver.go:81] Enabling controller service capability: EXPAND_VOLUME
+I0618 07:27:00.848463 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES
+I0618 07:27:00.848468 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES_PUBLISHED_NODES
+I0618 07:27:00.848473 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_WRITER
+--- PASS: TestGetSourceDiskSize (0.02s)
+ --- PASS: TestGetSourceDiskSize/max_depth_reached (0.00s)
+ --- PASS: TestGetSourceDiskSize/diskproperty_not_found (0.00s)
+ --- PASS: TestGetSourceDiskSize/nil_DiskSizeGB (0.01s)
+ --- PASS: TestGetSourceDiskSize/successful_search:_depth_1 (0.00s)
+ --- PASS: TestGetSourceDiskSize/successful_search:_depth_2 (0.00s)
+=== RUN TestIsAsyncAttachEnabled
+--- PASS: TestIsAsyncAttachEnabled (0.00s)
+=== RUN TestNewFakeDriver
+I0618 07:27:00.852385 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_VOLUME
+I0618 07:27:00.852404 1076117 driver.go:81] Enabling controller service capability: PUBLISH_UNPUBLISH_VOLUME
+I0618 07:27:00.852411 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_SNAPSHOT
+I0618 07:27:00.852418 1076117 driver.go:81] Enabling controller service capability: LIST_SNAPSHOTS
+I0618 07:27:00.852423 1076117 driver.go:81] Enabling controller service capability: CLONE_VOLUME
+I0618 07:27:00.852430 1076117 driver.go:81] Enabling controller service capability: EXPAND_VOLUME
+I0618 07:27:00.852435 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES
+I0618 07:27:00.852442 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES_PUBLISHED_NODES
+I0618 07:27:00.852448 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_WRITER
+--- PASS: TestNewFakeDriver (0.00s)
+=== RUN TestGetPluginInfo
+I0618 07:27:00.856544 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_VOLUME
+I0618 07:27:00.856563 1076117 driver.go:81] Enabling controller service capability: PUBLISH_UNPUBLISH_VOLUME
+I0618 07:27:00.856568 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_SNAPSHOT
+I0618 07:27:00.856573 1076117 driver.go:81] Enabling controller service capability: LIST_SNAPSHOTS
+I0618 07:27:00.856578 1076117 driver.go:81] Enabling controller service capability: CLONE_VOLUME
+I0618 07:27:00.856582 1076117 driver.go:81] Enabling controller service capability: EXPAND_VOLUME
+I0618 07:27:00.856587 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES
+I0618 07:27:00.856592 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES_PUBLISHED_NODES
+I0618 07:27:00.856597 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_WRITER
+I0618 07:27:00.862315 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_VOLUME
+I0618 07:27:00.862332 1076117 driver.go:81] Enabling controller service capability: PUBLISH_UNPUBLISH_VOLUME
+I0618 07:27:00.862337 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_SNAPSHOT
+I0618 07:27:00.862342 1076117 driver.go:81] Enabling controller service capability: LIST_SNAPSHOTS
+I0618 07:27:00.862346 1076117 driver.go:81] Enabling controller service capability: CLONE_VOLUME
+I0618 07:27:00.862351 1076117 driver.go:81] Enabling controller service capability: EXPAND_VOLUME
+I0618 07:27:00.862356 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES
+I0618 07:27:00.862360 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES_PUBLISHED_NODES
+I0618 07:27:00.862364 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_WRITER
+I0618 07:27:00.869563 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_VOLUME
+I0618 07:27:00.869579 1076117 driver.go:81] Enabling controller service capability: PUBLISH_UNPUBLISH_VOLUME
+I0618 07:27:00.869585 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_SNAPSHOT
+I0618 07:27:00.869590 1076117 driver.go:81] Enabling controller service capability: LIST_SNAPSHOTS
+I0618 07:27:00.869595 1076117 driver.go:81] Enabling controller service capability: CLONE_VOLUME
+I0618 07:27:00.869600 1076117 driver.go:81] Enabling controller service capability: EXPAND_VOLUME
+I0618 07:27:00.869605 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES
+I0618 07:27:00.869610 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES_PUBLISHED_NODES
+I0618 07:27:00.869616 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_WRITER
+--- PASS: TestGetPluginInfo (0.02s)
+=== RUN TestProbe
+I0618 07:27:00.873397 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_VOLUME
+I0618 07:27:00.873412 1076117 driver.go:81] Enabling controller service capability: PUBLISH_UNPUBLISH_VOLUME
+I0618 07:27:00.873418 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_SNAPSHOT
+I0618 07:27:00.873423 1076117 driver.go:81] Enabling controller service capability: LIST_SNAPSHOTS
+I0618 07:27:00.873429 1076117 driver.go:81] Enabling controller service capability: CLONE_VOLUME
+I0618 07:27:00.873434 1076117 driver.go:81] Enabling controller service capability: EXPAND_VOLUME
+I0618 07:27:00.873439 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES
+I0618 07:27:00.873443 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES_PUBLISHED_NODES
+I0618 07:27:00.873449 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_WRITER
+--- PASS: TestProbe (0.00s)
+=== RUN TestGetPluginCapabilities
+I0618 07:27:00.877131 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_VOLUME
+I0618 07:27:00.877146 1076117 driver.go:81] Enabling controller service capability: PUBLISH_UNPUBLISH_VOLUME
+I0618 07:27:00.877151 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_SNAPSHOT
+I0618 07:27:00.877159 1076117 driver.go:81] Enabling controller service capability: LIST_SNAPSHOTS
+I0618 07:27:00.877164 1076117 driver.go:81] Enabling controller service capability: CLONE_VOLUME
+I0618 07:27:00.877170 1076117 driver.go:81] Enabling controller service capability: EXPAND_VOLUME
+I0618 07:27:00.877174 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES
+I0618 07:27:00.877179 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES_PUBLISHED_NODES
+I0618 07:27:00.877185 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_WRITER
+--- PASS: TestGetPluginCapabilities (0.00s)
+=== RUN TestNodeGetCapabilities
+I0618 07:27:00.880827 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_VOLUME
+I0618 07:27:00.880841 1076117 driver.go:81] Enabling controller service capability: PUBLISH_UNPUBLISH_VOLUME
+I0618 07:27:00.880847 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_SNAPSHOT
+I0618 07:27:00.880852 1076117 driver.go:81] Enabling controller service capability: LIST_SNAPSHOTS
+I0618 07:27:00.880857 1076117 driver.go:81] Enabling controller service capability: CLONE_VOLUME
+I0618 07:27:00.880861 1076117 driver.go:81] Enabling controller service capability: EXPAND_VOLUME
+I0618 07:27:00.880866 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES
+I0618 07:27:00.880870 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES_PUBLISHED_NODES
+I0618 07:27:00.880876 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_WRITER
+--- PASS: TestNodeGetCapabilities (0.00s)
+=== RUN TestGetMaxDataDiskCount
+--- PASS: TestGetMaxDataDiskCount (0.00s)
+=== RUN TestEnsureMountPoint
+I0618 07:27:00.885059 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_VOLUME
+I0618 07:27:00.885076 1076117 driver.go:81] Enabling controller service capability: PUBLISH_UNPUBLISH_VOLUME
+I0618 07:27:00.885082 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_SNAPSHOT
+I0618 07:27:00.885087 1076117 driver.go:81] Enabling controller service capability: LIST_SNAPSHOTS
+I0618 07:27:00.885091 1076117 driver.go:81] Enabling controller service capability: CLONE_VOLUME
+I0618 07:27:00.885096 1076117 driver.go:81] Enabling controller service capability: EXPAND_VOLUME
+I0618 07:27:00.885101 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES
+I0618 07:27:00.885106 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES_PUBLISHED_NODES
+I0618 07:27:00.885112 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_WRITER
+E0618 07:27:00.885191 1076117 nodeserver.go:594] mkdir failed on target: /home/xinyuyuan/go/src/azuredisk-csi-driver/pkg/azuredisk/azuredisk.go (mkdir /home/xinyuyuan/go/src/azuredisk-csi-driver/pkg/azuredisk/azuredisk.go: not a directory)
+--- PASS: TestEnsureMountPoint (0.00s)
+=== RUN TestNodeGetInfo
+=== RUN TestNodeGetInfo/[Success]_Get_node_information_for_existing_VM
+I0618 07:27:00.889977 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_VOLUME
+I0618 07:27:00.889996 1076117 driver.go:81] Enabling controller service capability: PUBLISH_UNPUBLISH_VOLUME
+I0618 07:27:00.890003 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_SNAPSHOT
+I0618 07:27:00.890012 1076117 driver.go:81] Enabling controller service capability: LIST_SNAPSHOTS
+I0618 07:27:00.890018 1076117 driver.go:81] Enabling controller service capability: CLONE_VOLUME
+I0618 07:27:00.890023 1076117 driver.go:81] Enabling controller service capability: EXPAND_VOLUME
+I0618 07:27:00.890029 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES
+I0618 07:27:00.890035 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES_PUBLISHED_NODES
+I0618 07:27:00.890042 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_WRITER
+=== RUN TestNodeGetInfo/[Failure]_Get_node_information_for_non-existing_VM
+I0618 07:27:00.893446 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_VOLUME
+I0618 07:27:00.893460 1076117 driver.go:81] Enabling controller service capability: PUBLISH_UNPUBLISH_VOLUME
+I0618 07:27:00.893465 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_SNAPSHOT
+I0618 07:27:00.893470 1076117 driver.go:81] Enabling controller service capability: LIST_SNAPSHOTS
+I0618 07:27:00.893474 1076117 driver.go:81] Enabling controller service capability: CLONE_VOLUME
+I0618 07:27:00.893479 1076117 driver.go:81] Enabling controller service capability: EXPAND_VOLUME
+I0618 07:27:00.893483 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES
+I0618 07:27:00.893487 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES_PUBLISHED_NODES
+I0618 07:27:00.893570 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_WRITER
+W0618 07:27:00.893643 1076117 azure_wrap.go:79] Unable to find node devbox: instance not found
+W0618 07:27:00.893659 1076117 nodeserver.go:339] get zone(fakeNodeID) failed with: instance not found, fall back to get zone from node labels
+--- PASS: TestNodeGetInfo (0.01s)
+ --- PASS: TestNodeGetInfo/[Success]_Get_node_information_for_existing_VM (0.00s)
+ --- PASS: TestNodeGetInfo/[Failure]_Get_node_information_for_non-existing_VM (0.00s)
+=== RUN TestNodeGetVolumeStats
+I0618 07:27:00.896858 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_VOLUME
+I0618 07:27:00.896873 1076117 driver.go:81] Enabling controller service capability: PUBLISH_UNPUBLISH_VOLUME
+I0618 07:27:00.896878 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_SNAPSHOT
+I0618 07:27:00.896883 1076117 driver.go:81] Enabling controller service capability: LIST_SNAPSHOTS
+I0618 07:27:00.896888 1076117 driver.go:81] Enabling controller service capability: CLONE_VOLUME
+I0618 07:27:00.896892 1076117 driver.go:81] Enabling controller service capability: EXPAND_VOLUME
+I0618 07:27:00.896896 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES
+I0618 07:27:00.896901 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES_PUBLISHED_NODES
+I0618 07:27:00.896910 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_WRITER
+E0618 07:27:00.896956 1076117 nodeserver.go:449] NodeGetVolumeStats: failed to get volume stats for volume vol_1 path /not/a/real/directory: rpc error: code = NotFound desc = path /not/a/real/directory does not exist
+E0618 07:27:00.897007 1076117 nodeserver.go:449] NodeGetVolumeStats: failed to get volume stats for volume vol_1 path /tmp/fake-volume-path: rpc error: code = NotFound desc = failed to determine whether /tmp/fake-volume-path is block device: host util is not device path
+--- PASS: TestNodeGetVolumeStats (0.00s)
+=== RUN TestNodeStageVolume
+I0618 07:27:00.899909 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_VOLUME
+I0618 07:27:00.899923 1076117 driver.go:81] Enabling controller service capability: PUBLISH_UNPUBLISH_VOLUME
+I0618 07:27:00.899928 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_SNAPSHOT
+I0618 07:27:00.899932 1076117 driver.go:81] Enabling controller service capability: LIST_SNAPSHOTS
+I0618 07:27:00.899937 1076117 driver.go:81] Enabling controller service capability: CLONE_VOLUME
+I0618 07:27:00.899942 1076117 driver.go:81] Enabling controller service capability: EXPAND_VOLUME
+I0618 07:27:00.899946 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES
+I0618 07:27:00.899951 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES_PUBLISHED_NODES
+I0618 07:27:00.899956 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_WRITER
+W0618 07:27:00.900272 1076117 azure_common_linux.go:188] azureDisk - getDiskLinkByDevName by sdd under /dev/disk/azure/scsi1/ failed, error: read /dev/disk/azure/scsi1/ error: bad dir
+W0618 07:27:00.900297 1076117 azure_common_linux.go:188] azureDisk - getDiskLinkByDevName by sdd under /dev/disk/by-id/ failed, error: read /dev/disk/by-id/ error: bad dir
+E0618 07:27:00.900358 1076117 nodeserver.go:169] NodeStageVolume: could not determine if volume vol_1 needs to be resized: Failed readonly device check. Expected 1 or 0, got '16106127360'
+W0618 07:27:00.900385 1076117 azure_common_linux.go:188] azureDisk - getDiskLinkByDevName by sdd under /dev/disk/azure/scsi1/ failed, error: read /dev/disk/azure/scsi1/ error: bad dir
+W0618 07:27:00.900399 1076117 azure_common_linux.go:188] azureDisk - getDiskLinkByDevName by sdd under /dev/disk/by-id/ failed, error: read /dev/disk/by-id/ error: bad dir
+W0618 07:27:00.900443 1076117 azure_common_linux.go:188] azureDisk - getDiskLinkByDevName by sdd under /dev/disk/azure/scsi1/ failed, error: read /dev/disk/azure/scsi1/ error: bad dir
+W0618 07:27:00.900459 1076117 azure_common_linux.go:188] azureDisk - getDiskLinkByDevName by sdd under /dev/disk/by-id/ failed, error: read /dev/disk/by-id/ error: bad dir
+W0618 07:27:00.900517 1076117 azure_common_linux.go:188] azureDisk - getDiskLinkByDevName by sdd under /dev/disk/azure/scsi1/ failed, error: read /dev/disk/azure/scsi1/ error: bad dir
+W0618 07:27:00.900531 1076117 azure_common_linux.go:188] azureDisk - getDiskLinkByDevName by sdd under /dev/disk/by-id/ failed, error: read /dev/disk/by-id/ error: bad dir
+I0618 07:27:00.900574 1076117 mount_linux.go:515] Disk "/dev/sdd" appears to be unformatted, attempting to format as type: "ext4" with options: [-F -m0 /dev/sdd]
+I0618 07:27:00.900590 1076117 mount_linux.go:526] Disk successfully formatted (mkfs): ext4 - /dev/sdd /home/xinyuyuan/go/src/azuredisk-csi-driver/pkg/azuredisk/source_test
+E0618 07:27:00.900610 1076117 nodeserver.go:169] NodeStageVolume: could not determine if volume vol_1 needs to be resized: Failed readonly device check. Expected 1 or 0, got ''
+W0618 07:27:00.900656 1076117 azure_common_linux.go:188] azureDisk - getDiskLinkByDevName by sdd under /dev/disk/azure/scsi1/ failed, error: read /dev/disk/azure/scsi1/ error: bad dir
+W0618 07:27:00.900671 1076117 azure_common_linux.go:188] azureDisk - getDiskLinkByDevName by sdd under /dev/disk/by-id/ failed, error: read /dev/disk/by-id/ error: bad dir
+W0618 07:27:00.900714 1076117 azure_common_linux.go:188] azureDisk - getDiskLinkByDevName by sdd under /dev/disk/azure/scsi1/ failed, error: read /dev/disk/azure/scsi1/ error: bad dir
+W0618 07:27:00.900798 1076117 azure_common_linux.go:188] azureDisk - getDiskLinkByDevName by sdd under /dev/disk/by-id/ failed, error: read /dev/disk/by-id/ error: bad dir
+E0618 07:27:00.900847 1076117 nodeserver.go:169] NodeStageVolume: could not determine if volume vol_1 needs to be resized: Failed readonly device check. Expected 1 or 0, got '16106127360'
+--- PASS: TestNodeStageVolume (0.00s)
+=== RUN TestNodeUnstageVolume
+I0618 07:27:00.904301 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_VOLUME
+I0618 07:27:00.904313 1076117 driver.go:81] Enabling controller service capability: PUBLISH_UNPUBLISH_VOLUME
+I0618 07:27:00.904319 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_SNAPSHOT
+I0618 07:27:00.904324 1076117 driver.go:81] Enabling controller service capability: LIST_SNAPSHOTS
+I0618 07:27:00.904328 1076117 driver.go:81] Enabling controller service capability: CLONE_VOLUME
+I0618 07:27:00.904333 1076117 driver.go:81] Enabling controller service capability: EXPAND_VOLUME
+I0618 07:27:00.904338 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES
+I0618 07:27:00.904342 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES_PUBLISHED_NODES
+I0618 07:27:00.904347 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_WRITER
+W0618 07:27:00.904478 1076117 mount_helper_common.go:34] Warning: mount cleanup skipped because path does not exist: /home/xinyuyuan/go/src/azuredisk-csi-driver/pkg/azuredisk/abc.go
+--- PASS: TestNodeUnstageVolume (0.00s)
+=== RUN TestNodePublishVolume
+I0618 07:27:00.907775 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_VOLUME
+I0618 07:27:00.907793 1076117 driver.go:81] Enabling controller service capability: PUBLISH_UNPUBLISH_VOLUME
+I0618 07:27:00.907798 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_SNAPSHOT
+I0618 07:27:00.907803 1076117 driver.go:81] Enabling controller service capability: LIST_SNAPSHOTS
+I0618 07:27:00.907808 1076117 driver.go:81] Enabling controller service capability: CLONE_VOLUME
+I0618 07:27:00.907813 1076117 driver.go:81] Enabling controller service capability: EXPAND_VOLUME
+I0618 07:27:00.907818 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES
+I0618 07:27:00.907824 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES_PUBLISHED_NODES
+I0618 07:27:00.907833 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_WRITER
+E0618 07:27:00.908058 1076117 nodeserver.go:594] mkdir failed on target: /home/xinyuyuan/go/src/azuredisk-csi-driver/pkg/azuredisk/azuredisk.go (mkdir /home/xinyuyuan/go/src/azuredisk-csi-driver/pkg/azuredisk/azuredisk.go: not a directory)
+--- PASS: TestNodePublishVolume (0.00s)
+=== RUN TestNodeUnpublishVolume
+I0618 07:27:00.911611 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_VOLUME
+I0618 07:27:00.911630 1076117 driver.go:81] Enabling controller service capability: PUBLISH_UNPUBLISH_VOLUME
+I0618 07:27:00.911635 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_SNAPSHOT
+I0618 07:27:00.911641 1076117 driver.go:81] Enabling controller service capability: LIST_SNAPSHOTS
+I0618 07:27:00.911646 1076117 driver.go:81] Enabling controller service capability: CLONE_VOLUME
+I0618 07:27:00.911650 1076117 driver.go:81] Enabling controller service capability: EXPAND_VOLUME
+I0618 07:27:00.911655 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES
+I0618 07:27:00.911670 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES_PUBLISHED_NODES
+I0618 07:27:00.911676 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_WRITER
+W0618 07:27:00.911821 1076117 mount_helper_common.go:34] Warning: mount cleanup skipped because path does not exist: /home/xinyuyuan/go/src/azuredisk-csi-driver/pkg/azuredisk/abc.go
+--- PASS: TestNodeUnpublishVolume (0.00s)
+=== RUN TestNodeExpandVolume
+I0618 07:27:00.914959 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_VOLUME
+I0618 07:27:00.914979 1076117 driver.go:81] Enabling controller service capability: PUBLISH_UNPUBLISH_VOLUME
+I0618 07:27:00.914985 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_SNAPSHOT
+I0618 07:27:00.914990 1076117 driver.go:81] Enabling controller service capability: LIST_SNAPSHOTS
+I0618 07:27:00.914995 1076117 driver.go:81] Enabling controller service capability: CLONE_VOLUME
+I0618 07:27:00.915000 1076117 driver.go:81] Enabling controller service capability: EXPAND_VOLUME
+I0618 07:27:00.915004 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES
+I0618 07:27:00.915009 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES_PUBLISHED_NODES
+I0618 07:27:00.915015 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_WRITER
+E0618 07:27:00.915204 1076117 nodeserver.go:513] rpc error: code = Internal desc = could not resize volume "test" ("test"): resize of device test failed: exit status 1. resize2fs output: , will continue checking whether the volume has been resized
+--- PASS: TestNodeExpandVolume (0.00s)
+=== RUN TestGetBlockSizeBytes
+I0618 07:27:00.918473 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_VOLUME
+I0618 07:27:00.918489 1076117 driver.go:81] Enabling controller service capability: PUBLISH_UNPUBLISH_VOLUME
+I0618 07:27:00.918494 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_SNAPSHOT
+I0618 07:27:00.918500 1076117 driver.go:81] Enabling controller service capability: LIST_SNAPSHOTS
+I0618 07:27:00.918506 1076117 driver.go:81] Enabling controller service capability: CLONE_VOLUME
+I0618 07:27:00.918512 1076117 driver.go:81] Enabling controller service capability: EXPAND_VOLUME
+I0618 07:27:00.918517 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES
+I0618 07:27:00.918522 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES_PUBLISHED_NODES
+I0618 07:27:00.918527 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_WRITER
+--- PASS: TestGetBlockSizeBytes (0.01s)
+=== RUN TestEnsureBlockTargetFile
+I0618 07:27:00.926032 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_VOLUME
+I0618 07:27:00.926045 1076117 driver.go:81] Enabling controller service capability: PUBLISH_UNPUBLISH_VOLUME
+I0618 07:27:00.926050 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_SNAPSHOT
+I0618 07:27:00.926055 1076117 driver.go:81] Enabling controller service capability: LIST_SNAPSHOTS
+I0618 07:27:00.926060 1076117 driver.go:81] Enabling controller service capability: CLONE_VOLUME
+I0618 07:27:00.926065 1076117 driver.go:81] Enabling controller service capability: EXPAND_VOLUME
+I0618 07:27:00.926069 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES
+I0618 07:27:00.926074 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES_PUBLISHED_NODES
+I0618 07:27:00.926079 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_WRITER
+E0618 07:27:00.926509 1076117 nodeserver.go:594] mkdir failed on target: /home/xinyuyuan/go/src/azuredisk-csi-driver/pkg/azuredisk/test (mkdir /home/xinyuyuan/go/src/azuredisk-csi-driver/pkg/azuredisk/test: not a directory)
+--- PASS: TestEnsureBlockTargetFile (0.00s)
+=== RUN TestMakeDir
+--- PASS: TestMakeDir (0.00s)
+=== RUN TestGetDevicePathWithLUN
+I0618 07:27:00.954402 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_VOLUME
+I0618 07:27:00.954427 1076117 driver.go:81] Enabling controller service capability: PUBLISH_UNPUBLISH_VOLUME
+I0618 07:27:00.954433 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_SNAPSHOT
+I0618 07:27:00.954439 1076117 driver.go:81] Enabling controller service capability: LIST_SNAPSHOTS
+I0618 07:27:00.954444 1076117 driver.go:81] Enabling controller service capability: CLONE_VOLUME
+I0618 07:27:00.954449 1076117 driver.go:81] Enabling controller service capability: EXPAND_VOLUME
+I0618 07:27:00.954454 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES
+I0618 07:27:00.954459 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES_PUBLISHED_NODES
+I0618 07:27:00.954466 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_WRITER
+--- PASS: TestGetDevicePathWithLUN (0.03s)
+=== RUN TestGetDevicePathWithMountPath
+I0618 07:27:00.958264 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_VOLUME
+I0618 07:27:00.958281 1076117 driver.go:81] Enabling controller service capability: PUBLISH_UNPUBLISH_VOLUME
+I0618 07:27:00.958287 1076117 driver.go:81] Enabling controller service capability: CREATE_DELETE_SNAPSHOT
+I0618 07:27:00.958292 1076117 driver.go:81] Enabling controller service capability: LIST_SNAPSHOTS
+I0618 07:27:00.958296 1076117 driver.go:81] Enabling controller service capability: CLONE_VOLUME
+I0618 07:27:00.958300 1076117 driver.go:81] Enabling controller service capability: EXPAND_VOLUME
+I0618 07:27:00.958304 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES
+I0618 07:27:00.958309 1076117 driver.go:81] Enabling controller service capability: LIST_VOLUMES_PUBLISHED_NODES
+I0618 07:27:00.958315 1076117 driver.go:100] Enabling volume access mode: SINGLE_NODE_WRITER
+--- PASS: TestGetDevicePathWithMountPath (0.02s)
+=== RUN TestNodePublishVolumeIdempotentMount
+--- PASS: TestNodePublishVolumeIdempotentMount (0.00s)
+=== RUN TestGetVersion
+--- PASS: TestGetVersion (0.00s)
+=== RUN TestGetVersionYAML
+--- PASS: TestGetVersionYAML (0.00s)
+=== RUN TestGetUserAgent
+--- PASS: TestGetUserAgent (0.00s)
+PASS
+coverage: 76.8% of statements
+ok sigs.k8s.io/azuredisk-csi-driver/pkg/azuredisk (cached) coverage: 76.8% of statements
+? sigs.k8s.io/azuredisk-csi-driver/pkg/azuredisk/mockpersistentvolume [no test files]
+? sigs.k8s.io/azuredisk-csi-driver/pkg/azuredisk/mockkubeclient [no test files]
+? sigs.k8s.io/azuredisk-csi-driver/pkg/azurediskplugin [no test files]
+? sigs.k8s.io/azuredisk-csi-driver/pkg/optimization/mockoptimization [no test files]
+? sigs.k8s.io/azuredisk-csi-driver/pkg/os/disk [no test files]
+=== RUN TestCheckDiskName
+--- PASS: TestCheckDiskName (0.00s)
+=== RUN TestGetCachingMode
+--- PASS: TestGetCachingMode (0.00s)
+=== RUN TestGetKubeConfig
+--- PASS: TestGetKubeConfig (0.00s)
+=== RUN TestGetCloudProvider
+--- FAIL: TestGetCloudProvider (0.00s)
+panic: runtime error: invalid memory address or nil pointer dereference [recovered]
+ panic: runtime error: invalid memory address or nil pointer dereference
+[signal SIGSEGV: segmentation violation code=0x1 addr=0x18 pc=0x18ec1c4]
+
+goroutine 40 [running]:
+testing.tRunner.func1.2({0x1b22c20, 0x2f7a170})
+ /usr/local/go/src/testing/testing.go:1545 +0x238
+testing.tRunner.func1()
+ /usr/local/go/src/testing/testing.go:1548 +0x397
+panic({0x1b22c20?, 0x2f7a170?})
+ /usr/local/go/src/runtime/panic.go:914 +0x21f
+sigs.k8s.io/azuredisk-csi-driver/pkg/azureutils.TestGetCloudProvider(0xc000105040)
+ /home/xinyuyuan/go/src/azuredisk-csi-driver/pkg/azureutils/azure_disk_utils_test.go:397 +0x7a4
+testing.tRunner(0xc000105040, 0x1f0f700)
+ /usr/local/go/src/testing/testing.go:1595 +0xff
+created by testing.(*T).Run in goroutine 1
+ /usr/local/go/src/testing/testing.go:1648 +0x3ad
+FAIL sigs.k8s.io/azuredisk-csi-driver/pkg/azureutils 0.034s
+=== RUN TestNewCSIDriver
+E0618 06:55:40.327626 1051148 driver.go:41] Driver name missing
+E0618 06:55:40.327686 1051148 driver.go:46] NodeID missing
+E0618 06:55:40.327691 1051148 driver.go:51] Version argument missing, now skip it
+--- PASS: TestNewCSIDriver (0.00s)
+=== RUN TestGetVolumeCapabilityAccessModes
+I0618 06:55:40.327753 1051148 driver.go:100] Enabling volume access mode: SINGLE_NODE_WRITER
+--- PASS: TestGetVolumeCapabilityAccessModes (0.00s)
+=== RUN TestValidateControllerServiceRequest
+I0618 06:55:40.327801 1051148 driver.go:81] Enabling controller service capability: CREATE_DELETE_VOLUME
+I0618 06:55:40.327810 1051148 driver.go:81] Enabling controller service capability: PUBLISH_UNPUBLISH_VOLUME
+I0618 06:55:40.327818 1051148 driver.go:81] Enabling controller service capability: GET_CAPACITY
+I0618 06:55:40.327825 1051148 driver.go:81] Enabling controller service capability: LIST_VOLUMES
+--- PASS: TestValidateControllerServiceRequest (0.00s)
+=== RUN TestAddControllerServiceCapabilities
+I0618 06:55:40.327851 1051148 driver.go:81] Enabling controller service capability: CREATE_DELETE_VOLUME
+I0618 06:55:40.327860 1051148 driver.go:81] Enabling controller service capability: PUBLISH_UNPUBLISH_VOLUME
+I0618 06:55:40.327865 1051148 driver.go:81] Enabling controller service capability: GET_CAPACITY
+I0618 06:55:40.327869 1051148 driver.go:81] Enabling controller service capability: LIST_VOLUMES
+--- PASS: TestAddControllerServiceCapabilities (0.00s)
+=== RUN TestAddCNodeServiceCapabilities
+--- PASS: TestAddCNodeServiceCapabilities (0.00s)
+=== RUN TestNewFakeCSIDriver
+--- PASS: TestNewFakeCSIDriver (0.00s)
+=== RUN TestNewNonBlockingGRPCServer
+--- PASS: TestNewNonBlockingGRPCServer (0.00s)
+=== RUN TestStart
+I0618 06:55:40.829120 1051148 server.go:117] Listening for connections on address: &net.TCPAddr{IP:net.IP{0x7f, 0x0, 0x0, 0x1}, Port:34091, Zone:""}
+--- PASS: TestStart (1.00s)
+=== RUN TestServe
+I0618 06:55:41.330253 1051148 server.go:117] Listening for connections on address: &net.TCPAddr{IP:net.IP{0x7f, 0x0, 0x0, 0x1}, Port:38905, Zone:""}
+--- PASS: TestServe (1.00s)
+=== RUN TestWait
+--- PASS: TestWait (0.00s)
+=== RUN TestStop
+--- PASS: TestStop (0.00s)
+=== RUN TestForceStop
+--- PASS: TestForceStop (0.00s)
+=== RUN TestParseEndpoint
+--- PASS: TestParseEndpoint (0.00s)
+=== RUN TestLogGRPC
+=== RUN TestLogGRPC/with_secrets
+=== RUN TestLogGRPC/without_secrets
+--- PASS: TestLogGRPC (0.00s)
+ --- PASS: TestLogGRPC/with_secrets (0.00s)
+ --- PASS: TestLogGRPC/without_secrets (0.00s)
+=== RUN TestNewControllerServiceCapability
+--- PASS: TestNewControllerServiceCapability (0.00s)
+=== RUN TestNewNodeServiceCapability
+--- PASS: TestNewNodeServiceCapability (0.00s)
+=== RUN TestGetLogLevel
+--- PASS: TestGetLogLevel (0.00s)
+PASS
+coverage: 87.6% of statements
+ok sigs.k8s.io/azuredisk-csi-driver/pkg/csi-common (cached) coverage: 87.6% of statements
+=== RUN TestNewFakeSafeMounter
+--- PASS: TestNewFakeSafeMounter (0.00s)
+=== RUN TestMount
+--- PASS: TestMount (0.00s)
+=== RUN TestMountSensitive
+--- PASS: TestMountSensitive (0.00s)
+=== RUN TestIsLikelyNotMountPoint
+--- PASS: TestIsLikelyNotMountPoint (0.00s)
+=== RUN TestSetNextCommandOutputScripts
+--- PASS: TestSetNextCommandOutputScripts (0.00s)
+=== RUN TestNewSafeMounter
+--- PASS: TestNewSafeMounter (0.01s)
+PASS
+coverage: 85.3% of statements
+ok sigs.k8s.io/azuredisk-csi-driver/pkg/mounter (cached) coverage: 85.3% of statements
+=== RUN TestIsValidPerfProfile
+=== RUN TestIsValidPerfProfile/none_profile_should_return_true
+=== RUN TestIsValidPerfProfile/incorrect_profile_should_return_false
+=== RUN TestIsValidPerfProfile/default_profile_should_return_true
+=== RUN TestIsValidPerfProfile/advanced_profile_should_return_true
+--- PASS: TestIsValidPerfProfile (0.00s)
+ --- PASS: TestIsValidPerfProfile/none_profile_should_return_true (0.00s)
+ --- PASS: TestIsValidPerfProfile/incorrect_profile_should_return_false (0.00s)
+ --- PASS: TestIsValidPerfProfile/default_profile_should_return_true (0.00s)
+ --- PASS: TestIsValidPerfProfile/advanced_profile_should_return_true (0.00s)
+=== RUN TestGetDiskPerfAttributes
+=== RUN TestGetDiskPerfAttributes/valid_attributes_should_return_all_values
+=== RUN TestGetDiskPerfAttributes/incorrect_profile_should_return_error
+=== RUN TestGetDiskPerfAttributes/No_profile_specified_should_return_none_profile
+--- PASS: TestGetDiskPerfAttributes (0.00s)
+ --- PASS: TestGetDiskPerfAttributes/valid_attributes_should_return_all_values (0.00s)
+ --- PASS: TestGetDiskPerfAttributes/incorrect_profile_should_return_error (0.00s)
+ --- PASS: TestGetDiskPerfAttributes/No_profile_specified_should_return_none_profile (0.00s)
+=== RUN TestIsPerfTuningEnabled
+=== RUN TestIsPerfTuningEnabled/none_profile_should_return_false
+=== RUN TestIsPerfTuningEnabled/default_profile_should_return_true
+=== RUN TestIsPerfTuningEnabled/incorrect_profile_should_return_false
+=== RUN TestIsPerfTuningEnabled/advanced_profile_should_return_true
+--- PASS: TestIsPerfTuningEnabled (0.00s)
+ --- PASS: TestIsPerfTuningEnabled/none_profile_should_return_false (0.00s)
+ --- PASS: TestIsPerfTuningEnabled/default_profile_should_return_true (0.00s)
+ --- PASS: TestIsPerfTuningEnabled/incorrect_profile_should_return_false (0.00s)
+ --- PASS: TestIsPerfTuningEnabled/advanced_profile_should_return_true (0.00s)
+=== RUN TestAccountSupportsPerfOptimization
+=== RUN TestAccountSupportsPerfOptimization/Premium_LRS_supports_optimization
+=== RUN TestAccountSupportsPerfOptimization/StandardSSD_LRS_supports_optimization
+=== RUN TestAccountSupportsPerfOptimization/UltraSSD_LRS_doesn't_supports_optimization
+=== RUN TestAccountSupportsPerfOptimization/Standard_LRS_doesn't_supports_optimization
+=== RUN TestAccountSupportsPerfOptimization/invalid_account_doesn't_supports_optimization
+--- PASS: TestAccountSupportsPerfOptimization (0.00s)
+ --- PASS: TestAccountSupportsPerfOptimization/Premium_LRS_supports_optimization (0.00s)
+ --- PASS: TestAccountSupportsPerfOptimization/StandardSSD_LRS_supports_optimization (0.00s)
+ --- PASS: TestAccountSupportsPerfOptimization/UltraSSD_LRS_doesn't_supports_optimization (0.00s)
+ --- PASS: TestAccountSupportsPerfOptimization/Standard_LRS_doesn't_supports_optimization (0.00s)
+ --- PASS: TestAccountSupportsPerfOptimization/invalid_account_doesn't_supports_optimization (0.00s)
+=== RUN Test_OptimizeDiskPerformance
+=== RUN Test_OptimizeDiskPerformance/could_not_have_queue_dir_for_device_should_return_error
+=== RUN Test_OptimizeDiskPerformance/could_not_have_queue/iosched_dir_for_device_should_return_error
+=== RUN Test_OptimizeDiskPerformance/could_not_have_device_dir_for_device_should_return_error
+=== RUN Test_OptimizeDiskPerformance/valid_device
+--- PASS: Test_OptimizeDiskPerformance (0.04s)
+ --- PASS: Test_OptimizeDiskPerformance/could_not_have_queue_dir_for_device_should_return_error (0.00s)
+ --- PASS: Test_OptimizeDiskPerformance/could_not_have_queue/iosched_dir_for_device_should_return_error (0.02s)
+ --- PASS: Test_OptimizeDiskPerformance/could_not_have_device_dir_for_device_should_return_error (0.01s)
+ --- PASS: Test_OptimizeDiskPerformance/valid_device (0.01s)
+=== RUN Test_getOptimalDeviceSettings
+=== RUN Test_getOptimalDeviceSettings/Should_return_valid_disk_perf_settings
+=== RUN Test_getOptimalDeviceSettings/Should_return_valid_disk_perf_settings_with_no_capability_published_VM
+=== RUN Test_getOptimalDeviceSettings/Should_return_error_if_matching_disk_sku_is_not_found
+--- PASS: Test_getOptimalDeviceSettings (0.00s)
+ --- PASS: Test_getOptimalDeviceSettings/Should_return_valid_disk_perf_settings (0.00s)
+ --- PASS: Test_getOptimalDeviceSettings/Should_return_valid_disk_perf_settings_with_no_capability_published_VM (0.00s)
+ --- PASS: Test_getOptimalDeviceSettings/Should_return_error_if_matching_disk_sku_is_not_found (0.00s)
+=== RUN Test_getDeviceSettingsForBasicProfile
+=== RUN Test_getDeviceSettingsForBasicProfile/Should_return_valid_disk_perf_settings
+--- PASS: Test_getDeviceSettingsForBasicProfile (0.00s)
+ --- PASS: Test_getDeviceSettingsForBasicProfile/Should_return_valid_disk_perf_settings (0.00s)
+=== RUN Test_getDeviceSettingsForAdvancedProfile
+=== RUN Test_getDeviceSettingsForAdvancedProfile/Should_return_valid_disk_perf_settings_if_settings_are_passed
+--- PASS: Test_getDeviceSettingsForAdvancedProfile (0.00s)
+ --- PASS: Test_getDeviceSettingsForAdvancedProfile/Should_return_valid_disk_perf_settings_if_settings_are_passed (0.00s)
+=== RUN Test_applyDeviceSettings
+=== RUN Test_applyDeviceSettings/Should_fail_if_nil_settings_provided
+=== RUN Test_applyDeviceSettings/Should_fail_if_empty_settings_provided
+=== RUN Test_applyDeviceSettings/Should_fail_if_setting_with_non_absolute_path_provided
+=== RUN Test_applyDeviceSettings/Should_fail_if_setting_with_incorrect_prefix_provided
+--- PASS: Test_applyDeviceSettings (0.00s)
+ --- PASS: Test_applyDeviceSettings/Should_fail_if_nil_settings_provided (0.00s)
+ --- PASS: Test_applyDeviceSettings/Should_fail_if_empty_settings_provided (0.00s)
+ --- PASS: Test_applyDeviceSettings/Should_fail_if_setting_with_non_absolute_path_provided (0.00s)
+ --- PASS: Test_applyDeviceSettings/Should_fail_if_setting_with_incorrect_prefix_provided (0.00s)
+=== RUN Test_getMatchingDiskSku
+=== RUN Test_getMatchingDiskSku/Should_get_matching_sku_when_request_is_less_that_sku_size
+=== RUN Test_getMatchingDiskSku/Should_get_smaller_sku_when_multiple_skus_match
+=== RUN Test_getMatchingDiskSku/Should_get_error_if_disk_size_is_invalid
+=== RUN Test_getMatchingDiskSku/Should_get_smatching_sku_if_iops_and_bw_are_not_provided
+=== RUN Test_getMatchingDiskSku/Should_get_error_when_no_skus_are_passed.
+--- PASS: Test_getMatchingDiskSku (0.00s)
+ --- PASS: Test_getMatchingDiskSku/Should_get_matching_sku_when_request_is_less_that_sku_size (0.00s)
+ --- PASS: Test_getMatchingDiskSku/Should_get_smaller_sku_when_multiple_skus_match (0.00s)
+ --- PASS: Test_getMatchingDiskSku/Should_get_error_if_disk_size_is_invalid (0.00s)
+ --- PASS: Test_getMatchingDiskSku/Should_get_smatching_sku_if_iops_and_bw_are_not_provided (0.00s)
+ --- PASS: Test_getMatchingDiskSku/Should_get_error_when_no_skus_are_passed. (0.00s)
+=== RUN Test_meetsRequest
+=== RUN Test_meetsRequest/Sku_should_match_demand_which_is_same_as_limits
+=== RUN Test_meetsRequest/Sku_should_match_demand_which_is_less_than_limits
+=== RUN Test_meetsRequest/Sku_should__not_match_demand_which_is_more_than_limits
+=== RUN Test_meetsRequest/nil_Sku_should_return_false
+--- PASS: Test_meetsRequest (0.00s)
+ --- PASS: Test_meetsRequest/Sku_should_match_demand_which_is_same_as_limits (0.00s)
+ --- PASS: Test_meetsRequest/Sku_should_match_demand_which_is_less_than_limits (0.00s)
+ --- PASS: Test_meetsRequest/Sku_should__not_match_demand_which_is_more_than_limits (0.00s)
+ --- PASS: Test_meetsRequest/nil_Sku_should_return_false (0.00s)
+=== RUN Test_getDeviceName
+=== RUN Test_getDeviceName/return_error_for_invalid_file
+--- PASS: Test_getDeviceName (0.00s)
+ --- PASS: Test_getDeviceName/return_error_for_invalid_file (0.00s)
+=== RUN Test_echoToFile
+=== RUN Test_echoToFile/echo_should_succeed
+--- PASS: Test_echoToFile (0.02s)
+ --- PASS: Test_echoToFile/echo_should_succeed (0.02s)
+=== RUN TestSafeDeviceHelper_DeviceSupportsPerfOptimization
+=== RUN TestSafeDeviceHelper_DeviceSupportsPerfOptimization/invalid_profile_should_return_false
+=== RUN TestSafeDeviceHelper_DeviceSupportsPerfOptimization/ultrassd_lrs_account_should_return_false
+=== RUN TestSafeDeviceHelper_DeviceSupportsPerfOptimization/invalid_account_type_should_return_false
+=== RUN TestSafeDeviceHelper_DeviceSupportsPerfOptimization/none_profile_should_return_false
+=== RUN TestSafeDeviceHelper_DeviceSupportsPerfOptimization/valid_profile_and_account_should_return_true
+=== RUN TestSafeDeviceHelper_DeviceSupportsPerfOptimization/valid_profile_and_account_should_return_true#01
+--- PASS: TestSafeDeviceHelper_DeviceSupportsPerfOptimization (0.00s)
+ --- PASS: TestSafeDeviceHelper_DeviceSupportsPerfOptimization/invalid_profile_should_return_false (0.00s)
+ --- PASS: TestSafeDeviceHelper_DeviceSupportsPerfOptimization/ultrassd_lrs_account_should_return_false (0.00s)
+ --- PASS: TestSafeDeviceHelper_DeviceSupportsPerfOptimization/invalid_account_type_should_return_false (0.00s)
+ --- PASS: TestSafeDeviceHelper_DeviceSupportsPerfOptimization/none_profile_should_return_false (0.00s)
+ --- PASS: TestSafeDeviceHelper_DeviceSupportsPerfOptimization/valid_profile_and_account_should_return_true (0.00s)
+ --- PASS: TestSafeDeviceHelper_DeviceSupportsPerfOptimization/valid_profile_and_account_should_return_true#01 (0.00s)
+=== RUN TestDeviceHelper_OptimizeDiskPerformance
+=== RUN TestDeviceHelper_OptimizeDiskPerformance/nil_node_should_return_error
+=== RUN TestDeviceHelper_OptimizeDiskPerformance/invalid_sku_spec_should_return_error
+=== RUN TestDeviceHelper_OptimizeDiskPerformance/invalid_device_path_should_return_error
+=== RUN TestDeviceHelper_OptimizeDiskPerformance/could_not_set_max_sectors_kb_for_device_should_return_error
+--- PASS: TestDeviceHelper_OptimizeDiskPerformance (0.02s)
+ --- PASS: TestDeviceHelper_OptimizeDiskPerformance/nil_node_should_return_error (0.00s)
+ --- PASS: TestDeviceHelper_OptimizeDiskPerformance/invalid_sku_spec_should_return_error (0.00s)
+ --- PASS: TestDeviceHelper_OptimizeDiskPerformance/invalid_device_path_should_return_error (0.00s)
+ --- PASS: TestDeviceHelper_OptimizeDiskPerformance/could_not_set_max_sectors_kb_for_device_should_return_error (0.00s)
+=== RUN TestDiskSkuInfo_GetLatencyTest
+=== RUN TestDiskSkuInfo_GetLatencyTest/Premium
+=== RUN TestDiskSkuInfo_GetLatencyTest/Premium#01
+=== RUN TestDiskSkuInfo_GetLatencyTest/Premium#02
+=== RUN TestDiskSkuInfo_GetLatencyTest/Premium#03
+=== RUN TestDiskSkuInfo_GetLatencyTest/Premium#04
+=== RUN TestDiskSkuInfo_GetLatencyTest/Premium#05
+=== RUN TestDiskSkuInfo_GetLatencyTest/Premium#06
+=== RUN TestDiskSkuInfo_GetLatencyTest/Premium#07
+=== RUN TestDiskSkuInfo_GetLatencyTest/Premium#08
+=== RUN TestDiskSkuInfo_GetLatencyTest/Premium#09
+=== RUN TestDiskSkuInfo_GetLatencyTest/Premium#10
+=== RUN TestDiskSkuInfo_GetLatencyTest/Premium#11
+=== RUN TestDiskSkuInfo_GetLatencyTest/Premium#12
+=== RUN TestDiskSkuInfo_GetLatencyTest/Premium#13
+=== RUN TestDiskSkuInfo_GetLatencyTest/Standard
+=== RUN TestDiskSkuInfo_GetLatencyTest/Standard#01
+=== RUN TestDiskSkuInfo_GetLatencyTest/Standard#02
+=== RUN TestDiskSkuInfo_GetLatencyTest/Standard#03
+=== RUN TestDiskSkuInfo_GetLatencyTest/Standard#04
+=== RUN TestDiskSkuInfo_GetLatencyTest/Standard#05
+=== RUN TestDiskSkuInfo_GetLatencyTest/Standard#06
+=== RUN TestDiskSkuInfo_GetLatencyTest/Standard#07
+=== RUN TestDiskSkuInfo_GetLatencyTest/Standard#08
+=== RUN TestDiskSkuInfo_GetLatencyTest/Standard#09
+=== RUN TestDiskSkuInfo_GetLatencyTest/Standard#10
+=== RUN TestDiskSkuInfo_GetLatencyTest/Standard#11
+=== RUN TestDiskSkuInfo_GetLatencyTest/Standard#12
+=== RUN TestDiskSkuInfo_GetLatencyTest/Standard#13
+--- PASS: TestDiskSkuInfo_GetLatencyTest (0.00s)
+ --- PASS: TestDiskSkuInfo_GetLatencyTest/Premium (0.00s)
+ --- PASS: TestDiskSkuInfo_GetLatencyTest/Premium#01 (0.00s)
+ --- PASS: TestDiskSkuInfo_GetLatencyTest/Premium#02 (0.00s)
+ --- PASS: TestDiskSkuInfo_GetLatencyTest/Premium#03 (0.00s)
+ --- PASS: TestDiskSkuInfo_GetLatencyTest/Premium#04 (0.00s)
+ --- PASS: TestDiskSkuInfo_GetLatencyTest/Premium#05 (0.00s)
+ --- PASS: TestDiskSkuInfo_GetLatencyTest/Premium#06 (0.00s)
+ --- PASS: TestDiskSkuInfo_GetLatencyTest/Premium#07 (0.00s)
+ --- PASS: TestDiskSkuInfo_GetLatencyTest/Premium#08 (0.00s)
+ --- PASS: TestDiskSkuInfo_GetLatencyTest/Premium#09 (0.00s)
+ --- PASS: TestDiskSkuInfo_GetLatencyTest/Premium#10 (0.00s)
+ --- PASS: TestDiskSkuInfo_GetLatencyTest/Premium#11 (0.00s)
+ --- PASS: TestDiskSkuInfo_GetLatencyTest/Premium#12 (0.00s)
+ --- PASS: TestDiskSkuInfo_GetLatencyTest/Premium#13 (0.00s)
+ --- PASS: TestDiskSkuInfo_GetLatencyTest/Standard (0.00s)
+ --- PASS: TestDiskSkuInfo_GetLatencyTest/Standard#01 (0.00s)
+ --- PASS: TestDiskSkuInfo_GetLatencyTest/Standard#02 (0.00s)
+ --- PASS: TestDiskSkuInfo_GetLatencyTest/Standard#03 (0.00s)
+ --- PASS: TestDiskSkuInfo_GetLatencyTest/Standard#04 (0.00s)
+ --- PASS: TestDiskSkuInfo_GetLatencyTest/Standard#05 (0.00s)
+ --- PASS: TestDiskSkuInfo_GetLatencyTest/Standard#06 (0.00s)
+ --- PASS: TestDiskSkuInfo_GetLatencyTest/Standard#07 (0.00s)
+ --- PASS: TestDiskSkuInfo_GetLatencyTest/Standard#08 (0.00s)
+ --- PASS: TestDiskSkuInfo_GetLatencyTest/Standard#09 (0.00s)
+ --- PASS: TestDiskSkuInfo_GetLatencyTest/Standard#10 (0.00s)
+ --- PASS: TestDiskSkuInfo_GetLatencyTest/Standard#11 (0.00s)
+ --- PASS: TestDiskSkuInfo_GetLatencyTest/Standard#12 (0.00s)
+ --- PASS: TestDiskSkuInfo_GetLatencyTest/Standard#13 (0.00s)
+=== RUN TestNewNodeInfo
+=== RUN TestNewNodeInfo/[Success]_Should_succeed_for_an_existing_node.
+=== RUN TestNewNodeInfo/[Failure]_Should_return_an_error_if_Instances_interface_not_supported_by_cloud_provider.
+=== RUN TestNewNodeInfo/[Failure]_Should_return_an_error_for_a_non-existing_node.
+=== RUN TestNewNodeInfo/[Failure]_Should_return_an_error_for_a_unknown_SKU.
+--- PASS: TestNewNodeInfo (0.00s)
+ --- PASS: TestNewNodeInfo/[Success]_Should_succeed_for_an_existing_node. (0.00s)
+ --- PASS: TestNewNodeInfo/[Failure]_Should_return_an_error_if_Instances_interface_not_supported_by_cloud_provider. (0.00s)
+ --- PASS: TestNewNodeInfo/[Failure]_Should_return_an_error_for_a_non-existing_node. (0.00s)
+ --- PASS: TestNewNodeInfo/[Failure]_Should_return_an_error_for_a_unknown_SKU. (0.00s)
+PASS
+coverage: 95.1% of statements
+ok sigs.k8s.io/azuredisk-csi-driver/pkg/optimization (cached) coverage: 95.1% of statements
+? sigs.k8s.io/azuredisk-csi-driver/pkg/os/filesystem [no test files]
+? sigs.k8s.io/azuredisk-csi-driver/pkg/os/volume [no test files]
+? sigs.k8s.io/azuredisk-csi-driver/pkg/tool [no test files]
+=== RUN TestRoundUpBytes
+--- PASS: TestRoundUpBytes (0.00s)
+=== RUN TestRoundUpGiB
+--- PASS: TestRoundUpGiB (0.00s)
+=== RUN TestBytesToGiB
+--- PASS: TestBytesToGiB (0.00s)
+=== RUN TestGiBToBytes
+--- PASS: TestGiBToBytes (0.00s)
+=== RUN TestConvertTagsToMap
+--- PASS: TestConvertTagsToMap (0.00s)
+=== RUN TestMakeDir
+--- PASS: TestMakeDir (0.00s)
+=== RUN TestMakeFile
+--- PASS: TestMakeFile (0.00s)
+=== RUN TestVolumeLock
+--- PASS: TestVolumeLock (0.00s)
+PASS
+coverage: 93.3% of statements
+ok sigs.k8s.io/azuredisk-csi-driver/pkg/util (cached) coverage: 93.3% of statements
+=== RUN TestCreateAzureCredentialFileOnAzurePublicCloud
+=== RUN TestCreateAzureCredentialFileOnAzurePublicCloud/WithAzureCredentials
+2024/06/18 06:55:42 Running in Prow, converting AZURE_CREDENTIALS to AZURE_CREDENTIAL_FILE
+2024/06/18 06:55:42 Reading credentials file /tmp/azure.toml3803113459
+=== RUN TestCreateAzureCredentialFileOnAzurePublicCloud/WithEnvironmentVariables
+--- PASS: TestCreateAzureCredentialFileOnAzurePublicCloud (0.00s)
+ --- PASS: TestCreateAzureCredentialFileOnAzurePublicCloud/WithAzureCredentials (0.00s)
+ --- PASS: TestCreateAzureCredentialFileOnAzurePublicCloud/WithEnvironmentVariables (0.00s)
+=== RUN TestCreateAzureCredentialFileOnAzureStackCloud
+=== RUN TestCreateAzureCredentialFileOnAzureStackCloud/WithEnvironmentVariables
+--- PASS: TestCreateAzureCredentialFileOnAzureStackCloud (0.00s)
+ --- PASS: TestCreateAzureCredentialFileOnAzureStackCloud/WithEnvironmentVariables (0.00s)
+PASS
+coverage: 78.0% of statements
+ok sigs.k8s.io/azuredisk-csi-driver/test/utils/credentials (cached) coverage: 78.0% of statements
+FAIL
diff --git a/vendor/github.com/Microsoft/go-winio/.gitattributes b/vendor/github.com/Microsoft/go-winio/.gitattributes
new file mode 100644
index 0000000000..94f480de94
--- /dev/null
+++ b/vendor/github.com/Microsoft/go-winio/.gitattributes
@@ -0,0 +1 @@
+* text=auto eol=lf
\ No newline at end of file
diff --git a/vendor/github.com/Microsoft/go-winio/.gitignore b/vendor/github.com/Microsoft/go-winio/.gitignore
index b883f1fdc6..815e20660e 100644
--- a/vendor/github.com/Microsoft/go-winio/.gitignore
+++ b/vendor/github.com/Microsoft/go-winio/.gitignore
@@ -1 +1,10 @@
+.vscode/
+
*.exe
+
+# testing
+testdata
+
+# go workspaces
+go.work
+go.work.sum
diff --git a/vendor/github.com/Microsoft/go-winio/.golangci.yml b/vendor/github.com/Microsoft/go-winio/.golangci.yml
new file mode 100644
index 0000000000..af403bb13a
--- /dev/null
+++ b/vendor/github.com/Microsoft/go-winio/.golangci.yml
@@ -0,0 +1,144 @@
+run:
+ skip-dirs:
+ - pkg/etw/sample
+
+linters:
+ enable:
+ # style
+ - containedctx # struct contains a context
+ - dupl # duplicate code
+ - errname # erorrs are named correctly
+ - goconst # strings that should be constants
+ - godot # comments end in a period
+ - misspell
+ - nolintlint # "//nolint" directives are properly explained
+ - revive # golint replacement
+ - stylecheck # golint replacement, less configurable than revive
+ - unconvert # unnecessary conversions
+ - wastedassign
+
+ # bugs, performance, unused, etc ...
+ - contextcheck # function uses a non-inherited context
+ - errorlint # errors not wrapped for 1.13
+ - exhaustive # check exhaustiveness of enum switch statements
+ - gofmt # files are gofmt'ed
+ - gosec # security
+ - nestif # deeply nested ifs
+ - nilerr # returns nil even with non-nil error
+ - prealloc # slices that can be pre-allocated
+ - structcheck # unused struct fields
+ - unparam # unused function params
+
+issues:
+ exclude-rules:
+ # err is very often shadowed in nested scopes
+ - linters:
+ - govet
+ text: '^shadow: declaration of "err" shadows declaration'
+
+ # ignore long lines for skip autogen directives
+ - linters:
+ - revive
+ text: "^line-length-limit: "
+ source: "^//(go:generate|sys) "
+
+ # allow unjustified ignores of error checks in defer statements
+ - linters:
+ - nolintlint
+ text: "^directive `//nolint:errcheck` should provide explanation"
+ source: '^\s*defer '
+
+ # allow unjustified ignores of error lints for io.EOF
+ - linters:
+ - nolintlint
+ text: "^directive `//nolint:errorlint` should provide explanation"
+ source: '[=|!]= io.EOF'
+
+
+linters-settings:
+ govet:
+ enable-all: true
+ disable:
+ # struct order is often for Win32 compat
+ # also, ignore pointer bytes/GC issues for now until performance becomes an issue
+ - fieldalignment
+ check-shadowing: true
+ nolintlint:
+ allow-leading-space: false
+ require-explanation: true
+ require-specific: true
+ revive:
+ # revive is more configurable than static check, so likely the preferred alternative to static-check
+ # (once the perf issue is solved: https://github.com/golangci/golangci-lint/issues/2997)
+ enable-all-rules:
+ true
+ # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md
+ rules:
+ # rules with required arguments
+ - name: argument-limit
+ disabled: true
+ - name: banned-characters
+ disabled: true
+ - name: cognitive-complexity
+ disabled: true
+ - name: cyclomatic
+ disabled: true
+ - name: file-header
+ disabled: true
+ - name: function-length
+ disabled: true
+ - name: function-result-limit
+ disabled: true
+ - name: max-public-structs
+ disabled: true
+ # geneally annoying rules
+ - name: add-constant # complains about any and all strings and integers
+ disabled: true
+ - name: confusing-naming # we frequently use "Foo()" and "foo()" together
+ disabled: true
+ - name: flag-parameter # excessive, and a common idiom we use
+ disabled: true
+ # general config
+ - name: line-length-limit
+ arguments:
+ - 140
+ - name: var-naming
+ arguments:
+ - []
+ - - CID
+ - CRI
+ - CTRD
+ - DACL
+ - DLL
+ - DOS
+ - ETW
+ - FSCTL
+ - GCS
+ - GMSA
+ - HCS
+ - HV
+ - IO
+ - LCOW
+ - LDAP
+ - LPAC
+ - LTSC
+ - MMIO
+ - NT
+ - OCI
+ - PMEM
+ - PWSH
+ - RX
+ - SACl
+ - SID
+ - SMB
+ - TX
+ - VHD
+ - VHDX
+ - VMID
+ - VPCI
+ - WCOW
+ - WIM
+ stylecheck:
+ checks:
+ - "all"
+ - "-ST1003" # use revive's var naming
diff --git a/vendor/github.com/Microsoft/go-winio/README.md b/vendor/github.com/Microsoft/go-winio/README.md
index 5680010575..7474b4f0b6 100644
--- a/vendor/github.com/Microsoft/go-winio/README.md
+++ b/vendor/github.com/Microsoft/go-winio/README.md
@@ -1,4 +1,4 @@
-# go-winio
+# go-winio [![Build Status](https://github.com/microsoft/go-winio/actions/workflows/ci.yml/badge.svg)](https://github.com/microsoft/go-winio/actions/workflows/ci.yml)
This repository contains utilities for efficiently performing Win32 IO operations in
Go. Currently, this is focused on accessing named pipes and other file handles, and
@@ -11,12 +11,79 @@ package.
Please see the LICENSE file for licensing information.
-This project has adopted the [Microsoft Open Source Code of
-Conduct](https://opensource.microsoft.com/codeofconduct/). For more information
-see the [Code of Conduct
-FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact
-[opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional
-questions or comments.
+## Contributing
-Thanks to natefinch for the inspiration for this library. See https://github.com/natefinch/npipe
-for another named pipe implementation.
+This project welcomes contributions and suggestions.
+Most contributions require you to agree to a Contributor License Agreement (CLA) declaring that
+you have the right to, and actually do, grant us the rights to use your contribution.
+For details, visit [Microsoft CLA](https://cla.microsoft.com).
+
+When you submit a pull request, a CLA-bot will automatically determine whether you need to
+provide a CLA and decorate the PR appropriately (e.g., label, comment).
+Simply follow the instructions provided by the bot.
+You will only need to do this once across all repos using our CLA.
+
+Additionally, the pull request pipeline requires the following steps to be performed before
+mergining.
+
+### Code Sign-Off
+
+We require that contributors sign their commits using [`git commit --signoff`][git-commit-s]
+to certify they either authored the work themselves or otherwise have permission to use it in this project.
+
+A range of commits can be signed off using [`git rebase --signoff`][git-rebase-s].
+
+Please see [the developer certificate](https://developercertificate.org) for more info,
+as well as to make sure that you can attest to the rules listed.
+Our CI uses the DCO Github app to ensure that all commits in a given PR are signed-off.
+
+### Linting
+
+Code must pass a linting stage, which uses [`golangci-lint`][lint].
+The linting settings are stored in [`.golangci.yaml`](./.golangci.yaml), and can be run
+automatically with VSCode by adding the following to your workspace or folder settings:
+
+```json
+ "go.lintTool": "golangci-lint",
+ "go.lintOnSave": "package",
+```
+
+Additional editor [integrations options are also available][lint-ide].
+
+Alternatively, `golangci-lint` can be [installed locally][lint-install] and run from the repo root:
+
+```shell
+# use . or specify a path to only lint a package
+# to show all lint errors, use flags "--max-issues-per-linter=0 --max-same-issues=0"
+> golangci-lint run ./...
+```
+
+### Go Generate
+
+The pipeline checks that auto-generated code, via `go generate`, are up to date.
+
+This can be done for the entire repo:
+
+```shell
+> go generate ./...
+```
+
+## Code of Conduct
+
+This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/).
+For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or
+contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments.
+
+## Special Thanks
+
+Thanks to [natefinch][natefinch] for the inspiration for this library.
+See [npipe](https://github.com/natefinch/npipe) for another named pipe implementation.
+
+[lint]: https://golangci-lint.run/
+[lint-ide]: https://golangci-lint.run/usage/integrations/#editor-integration
+[lint-install]: https://golangci-lint.run/usage/install/#local-installation
+
+[git-commit-s]: https://git-scm.com/docs/git-commit#Documentation/git-commit.txt--s
+[git-rebase-s]: https://git-scm.com/docs/git-rebase#Documentation/git-rebase.txt---signoff
+
+[natefinch]: https://github.com/natefinch
diff --git a/vendor/github.com/Microsoft/go-winio/SECURITY.md b/vendor/github.com/Microsoft/go-winio/SECURITY.md
new file mode 100644
index 0000000000..869fdfe2b2
--- /dev/null
+++ b/vendor/github.com/Microsoft/go-winio/SECURITY.md
@@ -0,0 +1,41 @@
+
+
+## Security
+
+Microsoft takes the security of our software products and services seriously, which includes all source code repositories managed through our GitHub organizations, which include [Microsoft](https://github.com/Microsoft), [Azure](https://github.com/Azure), [DotNet](https://github.com/dotnet), [AspNet](https://github.com/aspnet), [Xamarin](https://github.com/xamarin), and [our GitHub organizations](https://opensource.microsoft.com/).
+
+If you believe you have found a security vulnerability in any Microsoft-owned repository that meets [Microsoft's definition of a security vulnerability](https://aka.ms/opensource/security/definition), please report it to us as described below.
+
+## Reporting Security Issues
+
+**Please do not report security vulnerabilities through public GitHub issues.**
+
+Instead, please report them to the Microsoft Security Response Center (MSRC) at [https://msrc.microsoft.com/create-report](https://aka.ms/opensource/security/create-report).
+
+If you prefer to submit without logging in, send email to [secure@microsoft.com](mailto:secure@microsoft.com). If possible, encrypt your message with our PGP key; please download it from the [Microsoft Security Response Center PGP Key page](https://aka.ms/opensource/security/pgpkey).
+
+You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your original message. Additional information can be found at [microsoft.com/msrc](https://aka.ms/opensource/security/msrc).
+
+Please include the requested information listed below (as much as you can provide) to help us better understand the nature and scope of the possible issue:
+
+ * Type of issue (e.g. buffer overflow, SQL injection, cross-site scripting, etc.)
+ * Full paths of source file(s) related to the manifestation of the issue
+ * The location of the affected source code (tag/branch/commit or direct URL)
+ * Any special configuration required to reproduce the issue
+ * Step-by-step instructions to reproduce the issue
+ * Proof-of-concept or exploit code (if possible)
+ * Impact of the issue, including how an attacker might exploit the issue
+
+This information will help us triage your report more quickly.
+
+If you are reporting for a bug bounty, more complete reports can contribute to a higher bounty award. Please visit our [Microsoft Bug Bounty Program](https://aka.ms/opensource/security/bounty) page for more details about our active programs.
+
+## Preferred Languages
+
+We prefer all communications to be in English.
+
+## Policy
+
+Microsoft follows the principle of [Coordinated Vulnerability Disclosure](https://aka.ms/opensource/security/cvd).
+
+
diff --git a/vendor/github.com/Microsoft/go-winio/backup.go b/vendor/github.com/Microsoft/go-winio/backup.go
index 2be34af431..09621c8846 100644
--- a/vendor/github.com/Microsoft/go-winio/backup.go
+++ b/vendor/github.com/Microsoft/go-winio/backup.go
@@ -1,3 +1,4 @@
+//go:build windows
// +build windows
package winio
@@ -7,11 +8,12 @@ import (
"errors"
"fmt"
"io"
- "io/ioutil"
"os"
"runtime"
"syscall"
"unicode/utf16"
+
+ "golang.org/x/sys/windows"
)
//sys backupRead(h syscall.Handle, b []byte, bytesRead *uint32, abort bool, processSecurity bool, context *uintptr) (err error) = BackupRead
@@ -24,7 +26,7 @@ const (
BackupAlternateData
BackupLink
BackupPropertyData
- BackupObjectId
+ BackupObjectId //revive:disable-line:var-naming ID, not Id
BackupReparseData
BackupSparseBlock
BackupTxfsData
@@ -34,14 +36,16 @@ const (
StreamSparseAttributes = uint32(8)
)
+//nolint:revive // var-naming: ALL_CAPS
const (
- WRITE_DAC = 0x40000
- WRITE_OWNER = 0x80000
- ACCESS_SYSTEM_SECURITY = 0x1000000
+ WRITE_DAC = windows.WRITE_DAC
+ WRITE_OWNER = windows.WRITE_OWNER
+ ACCESS_SYSTEM_SECURITY = windows.ACCESS_SYSTEM_SECURITY
)
// BackupHeader represents a backup stream of a file.
type BackupHeader struct {
+ //revive:disable-next-line:var-naming ID, not Id
Id uint32 // The backup stream ID
Attributes uint32 // Stream attributes
Size int64 // The size of the stream in bytes
@@ -49,8 +53,8 @@ type BackupHeader struct {
Offset int64 // The offset of the stream in the file (for BackupSparseBlock only).
}
-type win32StreamId struct {
- StreamId uint32
+type win32StreamID struct {
+ StreamID uint32
Attributes uint32
Size uint64
NameSize uint32
@@ -71,7 +75,7 @@ func NewBackupStreamReader(r io.Reader) *BackupStreamReader {
// Next returns the next backup stream and prepares for calls to Read(). It skips the remainder of the current stream if
// it was not completely read.
func (r *BackupStreamReader) Next() (*BackupHeader, error) {
- if r.bytesLeft > 0 {
+ if r.bytesLeft > 0 { //nolint:nestif // todo: flatten this
if s, ok := r.r.(io.Seeker); ok {
// Make sure Seek on io.SeekCurrent sometimes succeeds
// before trying the actual seek.
@@ -82,16 +86,16 @@ func (r *BackupStreamReader) Next() (*BackupHeader, error) {
r.bytesLeft = 0
}
}
- if _, err := io.Copy(ioutil.Discard, r); err != nil {
+ if _, err := io.Copy(io.Discard, r); err != nil {
return nil, err
}
}
- var wsi win32StreamId
+ var wsi win32StreamID
if err := binary.Read(r.r, binary.LittleEndian, &wsi); err != nil {
return nil, err
}
hdr := &BackupHeader{
- Id: wsi.StreamId,
+ Id: wsi.StreamID,
Attributes: wsi.Attributes,
Size: int64(wsi.Size),
}
@@ -102,7 +106,7 @@ func (r *BackupStreamReader) Next() (*BackupHeader, error) {
}
hdr.Name = syscall.UTF16ToString(name)
}
- if wsi.StreamId == BackupSparseBlock {
+ if wsi.StreamID == BackupSparseBlock {
if err := binary.Read(r.r, binary.LittleEndian, &hdr.Offset); err != nil {
return nil, err
}
@@ -147,8 +151,8 @@ func (w *BackupStreamWriter) WriteHeader(hdr *BackupHeader) error {
return fmt.Errorf("missing %d bytes", w.bytesLeft)
}
name := utf16.Encode([]rune(hdr.Name))
- wsi := win32StreamId{
- StreamId: hdr.Id,
+ wsi := win32StreamID{
+ StreamID: hdr.Id,
Attributes: hdr.Attributes,
Size: uint64(hdr.Size),
NameSize: uint32(len(name) * 2),
@@ -203,7 +207,7 @@ func (r *BackupFileReader) Read(b []byte) (int, error) {
var bytesRead uint32
err := backupRead(syscall.Handle(r.f.Fd()), b, &bytesRead, false, r.includeSecurity, &r.ctx)
if err != nil {
- return 0, &os.PathError{"BackupRead", r.f.Name(), err}
+ return 0, &os.PathError{Op: "BackupRead", Path: r.f.Name(), Err: err}
}
runtime.KeepAlive(r.f)
if bytesRead == 0 {
@@ -216,7 +220,7 @@ func (r *BackupFileReader) Read(b []byte) (int, error) {
// the underlying file.
func (r *BackupFileReader) Close() error {
if r.ctx != 0 {
- backupRead(syscall.Handle(r.f.Fd()), nil, nil, true, false, &r.ctx)
+ _ = backupRead(syscall.Handle(r.f.Fd()), nil, nil, true, false, &r.ctx)
runtime.KeepAlive(r.f)
r.ctx = 0
}
@@ -242,7 +246,7 @@ func (w *BackupFileWriter) Write(b []byte) (int, error) {
var bytesWritten uint32
err := backupWrite(syscall.Handle(w.f.Fd()), b, &bytesWritten, false, w.includeSecurity, &w.ctx)
if err != nil {
- return 0, &os.PathError{"BackupWrite", w.f.Name(), err}
+ return 0, &os.PathError{Op: "BackupWrite", Path: w.f.Name(), Err: err}
}
runtime.KeepAlive(w.f)
if int(bytesWritten) != len(b) {
@@ -255,7 +259,7 @@ func (w *BackupFileWriter) Write(b []byte) (int, error) {
// close the underlying file.
func (w *BackupFileWriter) Close() error {
if w.ctx != 0 {
- backupWrite(syscall.Handle(w.f.Fd()), nil, nil, true, false, &w.ctx)
+ _ = backupWrite(syscall.Handle(w.f.Fd()), nil, nil, true, false, &w.ctx)
runtime.KeepAlive(w.f)
w.ctx = 0
}
@@ -271,7 +275,13 @@ func OpenForBackup(path string, access uint32, share uint32, createmode uint32)
if err != nil {
return nil, err
}
- h, err := syscall.CreateFile(&winPath[0], access, share, nil, createmode, syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OPEN_REPARSE_POINT, 0)
+ h, err := syscall.CreateFile(&winPath[0],
+ access,
+ share,
+ nil,
+ createmode,
+ syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OPEN_REPARSE_POINT,
+ 0)
if err != nil {
err = &os.PathError{Op: "open", Path: path, Err: err}
return nil, err
diff --git a/vendor/github.com/Microsoft/go-winio/doc.go b/vendor/github.com/Microsoft/go-winio/doc.go
new file mode 100644
index 0000000000..1f5bfe2d54
--- /dev/null
+++ b/vendor/github.com/Microsoft/go-winio/doc.go
@@ -0,0 +1,22 @@
+// This package provides utilities for efficiently performing Win32 IO operations in Go.
+// Currently, this package is provides support for genreal IO and management of
+// - named pipes
+// - files
+// - [Hyper-V sockets]
+//
+// This code is similar to Go's [net] package, and uses IO completion ports to avoid
+// blocking IO on system threads, allowing Go to reuse the thread to schedule other goroutines.
+//
+// This limits support to Windows Vista and newer operating systems.
+//
+// Additionally, this package provides support for:
+// - creating and managing GUIDs
+// - writing to [ETW]
+// - opening and manageing VHDs
+// - parsing [Windows Image files]
+// - auto-generating Win32 API code
+//
+// [Hyper-V sockets]: https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/user-guide/make-integration-service
+// [ETW]: https://docs.microsoft.com/en-us/windows-hardware/drivers/devtest/event-tracing-for-windows--etw-
+// [Windows Image files]: https://docs.microsoft.com/en-us/windows-hardware/manufacture/desktop/work-with-windows-images
+package winio
diff --git a/vendor/github.com/Microsoft/go-winio/ea.go b/vendor/github.com/Microsoft/go-winio/ea.go
index 4051c1b33b..e104dbdfdf 100644
--- a/vendor/github.com/Microsoft/go-winio/ea.go
+++ b/vendor/github.com/Microsoft/go-winio/ea.go
@@ -33,7 +33,7 @@ func parseEa(b []byte) (ea ExtendedAttribute, nb []byte, err error) {
err = binary.Read(bytes.NewReader(b), binary.LittleEndian, &info)
if err != nil {
err = errInvalidEaBuffer
- return
+ return ea, nb, err
}
nameOffset := fileFullEaInformationSize
@@ -43,7 +43,7 @@ func parseEa(b []byte) (ea ExtendedAttribute, nb []byte, err error) {
nextOffset := int(info.NextEntryOffset)
if valueLen+valueOffset > len(b) || nextOffset < 0 || nextOffset > len(b) {
err = errInvalidEaBuffer
- return
+ return ea, nb, err
}
ea.Name = string(b[nameOffset : nameOffset+nameLen])
@@ -52,7 +52,7 @@ func parseEa(b []byte) (ea ExtendedAttribute, nb []byte, err error) {
if info.NextEntryOffset != 0 {
nb = b[info.NextEntryOffset:]
}
- return
+ return ea, nb, err
}
// DecodeExtendedAttributes decodes a list of EAs from a FILE_FULL_EA_INFORMATION
@@ -67,7 +67,7 @@ func DecodeExtendedAttributes(b []byte) (eas []ExtendedAttribute, err error) {
eas = append(eas, ea)
b = nb
}
- return
+ return eas, err
}
func writeEa(buf *bytes.Buffer, ea *ExtendedAttribute, last bool) error {
diff --git a/vendor/github.com/Microsoft/go-winio/file.go b/vendor/github.com/Microsoft/go-winio/file.go
index 0385e41081..175a99d3f4 100644
--- a/vendor/github.com/Microsoft/go-winio/file.go
+++ b/vendor/github.com/Microsoft/go-winio/file.go
@@ -1,3 +1,4 @@
+//go:build windows
// +build windows
package winio
@@ -10,6 +11,8 @@ import (
"sync/atomic"
"syscall"
"time"
+
+ "golang.org/x/sys/windows"
)
//sys cancelIoEx(file syscall.Handle, o *syscall.Overlapped) (err error) = CancelIoEx
@@ -23,6 +26,8 @@ type atomicBool int32
func (b *atomicBool) isSet() bool { return atomic.LoadInt32((*int32)(b)) != 0 }
func (b *atomicBool) setFalse() { atomic.StoreInt32((*int32)(b), 0) }
func (b *atomicBool) setTrue() { atomic.StoreInt32((*int32)(b), 1) }
+
+//revive:disable-next-line:predeclared Keep "new" to maintain consistency with "atomic" pkg
func (b *atomicBool) swap(new bool) bool {
var newInt int32
if new {
@@ -31,11 +36,6 @@ func (b *atomicBool) swap(new bool) bool {
return atomic.SwapInt32((*int32)(b), newInt) == 1
}
-const (
- cFILE_SKIP_COMPLETION_PORT_ON_SUCCESS = 1
- cFILE_SKIP_SET_EVENT_ON_HANDLE = 2
-)
-
var (
ErrFileClosed = errors.New("file has already been closed")
ErrTimeout = &timeoutError{}
@@ -43,28 +43,28 @@ var (
type timeoutError struct{}
-func (e *timeoutError) Error() string { return "i/o timeout" }
-func (e *timeoutError) Timeout() bool { return true }
-func (e *timeoutError) Temporary() bool { return true }
+func (*timeoutError) Error() string { return "i/o timeout" }
+func (*timeoutError) Timeout() bool { return true }
+func (*timeoutError) Temporary() bool { return true }
type timeoutChan chan struct{}
var ioInitOnce sync.Once
var ioCompletionPort syscall.Handle
-// ioResult contains the result of an asynchronous IO operation
+// ioResult contains the result of an asynchronous IO operation.
type ioResult struct {
bytes uint32
err error
}
-// ioOperation represents an outstanding asynchronous Win32 IO
+// ioOperation represents an outstanding asynchronous Win32 IO.
type ioOperation struct {
o syscall.Overlapped
ch chan ioResult
}
-func initIo() {
+func initIO() {
h, err := createIoCompletionPort(syscall.InvalidHandle, 0, 0, 0xffffffff)
if err != nil {
panic(err)
@@ -93,15 +93,15 @@ type deadlineHandler struct {
timedout atomicBool
}
-// makeWin32File makes a new win32File from an existing file handle
+// makeWin32File makes a new win32File from an existing file handle.
func makeWin32File(h syscall.Handle) (*win32File, error) {
f := &win32File{handle: h}
- ioInitOnce.Do(initIo)
+ ioInitOnce.Do(initIO)
_, err := createIoCompletionPort(h, ioCompletionPort, 0, 0xffffffff)
if err != nil {
return nil, err
}
- err = setFileCompletionNotificationModes(h, cFILE_SKIP_COMPLETION_PORT_ON_SUCCESS|cFILE_SKIP_SET_EVENT_ON_HANDLE)
+ err = setFileCompletionNotificationModes(h, windows.FILE_SKIP_COMPLETION_PORT_ON_SUCCESS|windows.FILE_SKIP_SET_EVENT_ON_HANDLE)
if err != nil {
return nil, err
}
@@ -120,14 +120,14 @@ func MakeOpenFile(h syscall.Handle) (io.ReadWriteCloser, error) {
return f, nil
}
-// closeHandle closes the resources associated with a Win32 handle
+// closeHandle closes the resources associated with a Win32 handle.
func (f *win32File) closeHandle() {
f.wgLock.Lock()
// Atomically set that we are closing, releasing the resources only once.
if !f.closing.swap(true) {
f.wgLock.Unlock()
// cancel all IO and wait for it to complete
- cancelIoEx(f.handle, nil)
+ _ = cancelIoEx(f.handle, nil)
f.wg.Wait()
// at this point, no new IO can start
syscall.Close(f.handle)
@@ -143,9 +143,14 @@ func (f *win32File) Close() error {
return nil
}
-// prepareIo prepares for a new IO operation.
+// IsClosed checks if the file has been closed.
+func (f *win32File) IsClosed() bool {
+ return f.closing.isSet()
+}
+
+// prepareIO prepares for a new IO operation.
// The caller must call f.wg.Done() when the IO is finished, prior to Close() returning.
-func (f *win32File) prepareIo() (*ioOperation, error) {
+func (f *win32File) prepareIO() (*ioOperation, error) {
f.wgLock.RLock()
if f.closing.isSet() {
f.wgLock.RUnlock()
@@ -158,7 +163,7 @@ func (f *win32File) prepareIo() (*ioOperation, error) {
return c, nil
}
-// ioCompletionProcessor processes completed async IOs forever
+// ioCompletionProcessor processes completed async IOs forever.
func ioCompletionProcessor(h syscall.Handle) {
for {
var bytes uint32
@@ -172,15 +177,17 @@ func ioCompletionProcessor(h syscall.Handle) {
}
}
-// asyncIo processes the return value from ReadFile or WriteFile, blocking until
+// todo: helsaawy - create an asyncIO version that takes a context
+
+// asyncIO processes the return value from ReadFile or WriteFile, blocking until
// the operation has actually completed.
-func (f *win32File) asyncIo(c *ioOperation, d *deadlineHandler, bytes uint32, err error) (int, error) {
- if err != syscall.ERROR_IO_PENDING {
+func (f *win32File) asyncIO(c *ioOperation, d *deadlineHandler, bytes uint32, err error) (int, error) {
+ if err != syscall.ERROR_IO_PENDING { //nolint:errorlint // err is Errno
return int(bytes), err
}
if f.closing.isSet() {
- cancelIoEx(f.handle, &c.o)
+ _ = cancelIoEx(f.handle, &c.o)
}
var timeout timeoutChan
@@ -194,7 +201,7 @@ func (f *win32File) asyncIo(c *ioOperation, d *deadlineHandler, bytes uint32, er
select {
case r = <-c.ch:
err = r.err
- if err == syscall.ERROR_OPERATION_ABORTED {
+ if err == syscall.ERROR_OPERATION_ABORTED { //nolint:errorlint // err is Errno
if f.closing.isSet() {
err = ErrFileClosed
}
@@ -204,10 +211,10 @@ func (f *win32File) asyncIo(c *ioOperation, d *deadlineHandler, bytes uint32, er
err = wsaGetOverlappedResult(f.handle, &c.o, &bytes, false, &flags)
}
case <-timeout:
- cancelIoEx(f.handle, &c.o)
+ _ = cancelIoEx(f.handle, &c.o)
r = <-c.ch
err = r.err
- if err == syscall.ERROR_OPERATION_ABORTED {
+ if err == syscall.ERROR_OPERATION_ABORTED { //nolint:errorlint // err is Errno
err = ErrTimeout
}
}
@@ -215,13 +222,14 @@ func (f *win32File) asyncIo(c *ioOperation, d *deadlineHandler, bytes uint32, er
// runtime.KeepAlive is needed, as c is passed via native
// code to ioCompletionProcessor, c must remain alive
// until the channel read is complete.
+ // todo: (de)allocate *ioOperation via win32 heap functions, instead of needing to KeepAlive?
runtime.KeepAlive(c)
return int(r.bytes), err
}
// Read reads from a file handle.
func (f *win32File) Read(b []byte) (int, error) {
- c, err := f.prepareIo()
+ c, err := f.prepareIO()
if err != nil {
return 0, err
}
@@ -233,13 +241,13 @@ func (f *win32File) Read(b []byte) (int, error) {
var bytes uint32
err = syscall.ReadFile(f.handle, b, &bytes, &c.o)
- n, err := f.asyncIo(c, &f.readDeadline, bytes, err)
+ n, err := f.asyncIO(c, &f.readDeadline, bytes, err)
runtime.KeepAlive(b)
// Handle EOF conditions.
if err == nil && n == 0 && len(b) != 0 {
return 0, io.EOF
- } else if err == syscall.ERROR_BROKEN_PIPE {
+ } else if err == syscall.ERROR_BROKEN_PIPE { //nolint:errorlint // err is Errno
return 0, io.EOF
} else {
return n, err
@@ -248,7 +256,7 @@ func (f *win32File) Read(b []byte) (int, error) {
// Write writes to a file handle.
func (f *win32File) Write(b []byte) (int, error) {
- c, err := f.prepareIo()
+ c, err := f.prepareIO()
if err != nil {
return 0, err
}
@@ -260,7 +268,7 @@ func (f *win32File) Write(b []byte) (int, error) {
var bytes uint32
err = syscall.WriteFile(f.handle, b, &bytes, &c.o)
- n, err := f.asyncIo(c, &f.writeDeadline, bytes, err)
+ n, err := f.asyncIO(c, &f.writeDeadline, bytes, err)
runtime.KeepAlive(b)
return n, err
}
diff --git a/vendor/github.com/Microsoft/go-winio/fileinfo.go b/vendor/github.com/Microsoft/go-winio/fileinfo.go
index 3ab6bff69c..702950e72a 100644
--- a/vendor/github.com/Microsoft/go-winio/fileinfo.go
+++ b/vendor/github.com/Microsoft/go-winio/fileinfo.go
@@ -1,3 +1,4 @@
+//go:build windows
// +build windows
package winio
@@ -14,13 +15,18 @@ import (
type FileBasicInfo struct {
CreationTime, LastAccessTime, LastWriteTime, ChangeTime windows.Filetime
FileAttributes uint32
- pad uint32 // padding
+ _ uint32 // padding
}
// GetFileBasicInfo retrieves times and attributes for a file.
func GetFileBasicInfo(f *os.File) (*FileBasicInfo, error) {
bi := &FileBasicInfo{}
- if err := windows.GetFileInformationByHandleEx(windows.Handle(f.Fd()), windows.FileBasicInfo, (*byte)(unsafe.Pointer(bi)), uint32(unsafe.Sizeof(*bi))); err != nil {
+ if err := windows.GetFileInformationByHandleEx(
+ windows.Handle(f.Fd()),
+ windows.FileBasicInfo,
+ (*byte)(unsafe.Pointer(bi)),
+ uint32(unsafe.Sizeof(*bi)),
+ ); err != nil {
return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err}
}
runtime.KeepAlive(f)
@@ -29,7 +35,12 @@ func GetFileBasicInfo(f *os.File) (*FileBasicInfo, error) {
// SetFileBasicInfo sets times and attributes for a file.
func SetFileBasicInfo(f *os.File, bi *FileBasicInfo) error {
- if err := windows.SetFileInformationByHandle(windows.Handle(f.Fd()), windows.FileBasicInfo, (*byte)(unsafe.Pointer(bi)), uint32(unsafe.Sizeof(*bi))); err != nil {
+ if err := windows.SetFileInformationByHandle(
+ windows.Handle(f.Fd()),
+ windows.FileBasicInfo,
+ (*byte)(unsafe.Pointer(bi)),
+ uint32(unsafe.Sizeof(*bi)),
+ ); err != nil {
return &os.PathError{Op: "SetFileInformationByHandle", Path: f.Name(), Err: err}
}
runtime.KeepAlive(f)
@@ -48,7 +59,10 @@ type FileStandardInfo struct {
// GetFileStandardInfo retrieves ended information for the file.
func GetFileStandardInfo(f *os.File) (*FileStandardInfo, error) {
si := &FileStandardInfo{}
- if err := windows.GetFileInformationByHandleEx(windows.Handle(f.Fd()), windows.FileStandardInfo, (*byte)(unsafe.Pointer(si)), uint32(unsafe.Sizeof(*si))); err != nil {
+ if err := windows.GetFileInformationByHandleEx(windows.Handle(f.Fd()),
+ windows.FileStandardInfo,
+ (*byte)(unsafe.Pointer(si)),
+ uint32(unsafe.Sizeof(*si))); err != nil {
return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err}
}
runtime.KeepAlive(f)
@@ -65,7 +79,12 @@ type FileIDInfo struct {
// GetFileID retrieves the unique (volume, file ID) pair for a file.
func GetFileID(f *os.File) (*FileIDInfo, error) {
fileID := &FileIDInfo{}
- if err := windows.GetFileInformationByHandleEx(windows.Handle(f.Fd()), windows.FileIdInfo, (*byte)(unsafe.Pointer(fileID)), uint32(unsafe.Sizeof(*fileID))); err != nil {
+ if err := windows.GetFileInformationByHandleEx(
+ windows.Handle(f.Fd()),
+ windows.FileIdInfo,
+ (*byte)(unsafe.Pointer(fileID)),
+ uint32(unsafe.Sizeof(*fileID)),
+ ); err != nil {
return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err}
}
runtime.KeepAlive(f)
diff --git a/vendor/github.com/Microsoft/go-winio/hvsock.go b/vendor/github.com/Microsoft/go-winio/hvsock.go
index b632f8f8bb..52f1c280f6 100644
--- a/vendor/github.com/Microsoft/go-winio/hvsock.go
+++ b/vendor/github.com/Microsoft/go-winio/hvsock.go
@@ -1,8 +1,11 @@
+//go:build windows
// +build windows
package winio
import (
+ "context"
+ "errors"
"fmt"
"io"
"net"
@@ -11,16 +14,87 @@ import (
"time"
"unsafe"
+ "golang.org/x/sys/windows"
+
+ "github.com/Microsoft/go-winio/internal/socket"
"github.com/Microsoft/go-winio/pkg/guid"
)
-//sys bind(s syscall.Handle, name unsafe.Pointer, namelen int32) (err error) [failretval==socketError] = ws2_32.bind
+const afHVSock = 34 // AF_HYPERV
-const (
- afHvSock = 34 // AF_HYPERV
+// Well known Service and VM IDs
+//https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/user-guide/make-integration-service#vmid-wildcards
- socketError = ^uintptr(0)
-)
+// HvsockGUIDWildcard is the wildcard VmId for accepting connections from all partitions.
+func HvsockGUIDWildcard() guid.GUID { // 00000000-0000-0000-0000-000000000000
+ return guid.GUID{}
+}
+
+// HvsockGUIDBroadcast is the wildcard VmId for broadcasting sends to all partitions.
+func HvsockGUIDBroadcast() guid.GUID { //ffffffff-ffff-ffff-ffff-ffffffffffff
+ return guid.GUID{
+ Data1: 0xffffffff,
+ Data2: 0xffff,
+ Data3: 0xffff,
+ Data4: [8]uint8{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
+ }
+}
+
+// HvsockGUIDLoopback is the Loopback VmId for accepting connections to the same partition as the connector.
+func HvsockGUIDLoopback() guid.GUID { // e0e16197-dd56-4a10-9195-5ee7a155a838
+ return guid.GUID{
+ Data1: 0xe0e16197,
+ Data2: 0xdd56,
+ Data3: 0x4a10,
+ Data4: [8]uint8{0x91, 0x95, 0x5e, 0xe7, 0xa1, 0x55, 0xa8, 0x38},
+ }
+}
+
+// HvsockGUIDSiloHost is the address of a silo's host partition:
+// - The silo host of a hosted silo is the utility VM.
+// - The silo host of a silo on a physical host is the physical host.
+func HvsockGUIDSiloHost() guid.GUID { // 36bd0c5c-7276-4223-88ba-7d03b654c568
+ return guid.GUID{
+ Data1: 0x36bd0c5c,
+ Data2: 0x7276,
+ Data3: 0x4223,
+ Data4: [8]byte{0x88, 0xba, 0x7d, 0x03, 0xb6, 0x54, 0xc5, 0x68},
+ }
+}
+
+// HvsockGUIDChildren is the wildcard VmId for accepting connections from the connector's child partitions.
+func HvsockGUIDChildren() guid.GUID { // 90db8b89-0d35-4f79-8ce9-49ea0ac8b7cd
+ return guid.GUID{
+ Data1: 0x90db8b89,
+ Data2: 0xd35,
+ Data3: 0x4f79,
+ Data4: [8]uint8{0x8c, 0xe9, 0x49, 0xea, 0xa, 0xc8, 0xb7, 0xcd},
+ }
+}
+
+// HvsockGUIDParent is the wildcard VmId for accepting connections from the connector's parent partition.
+// Listening on this VmId accepts connection from:
+// - Inside silos: silo host partition.
+// - Inside hosted silo: host of the VM.
+// - Inside VM: VM host.
+// - Physical host: Not supported.
+func HvsockGUIDParent() guid.GUID { // a42e7cda-d03f-480c-9cc2-a4de20abb878
+ return guid.GUID{
+ Data1: 0xa42e7cda,
+ Data2: 0xd03f,
+ Data3: 0x480c,
+ Data4: [8]uint8{0x9c, 0xc2, 0xa4, 0xde, 0x20, 0xab, 0xb8, 0x78},
+ }
+}
+
+// hvsockVsockServiceTemplate is the Service GUID used for the VSOCK protocol.
+func hvsockVsockServiceTemplate() guid.GUID { // 00000000-facb-11e6-bd58-64006a7986d3
+ return guid.GUID{
+ Data2: 0xfacb,
+ Data3: 0x11e6,
+ Data4: [8]uint8{0xbd, 0x58, 0x64, 0x00, 0x6a, 0x79, 0x86, 0xd3},
+ }
+}
// An HvsockAddr is an address for a AF_HYPERV socket.
type HvsockAddr struct {
@@ -35,8 +109,10 @@ type rawHvsockAddr struct {
ServiceID guid.GUID
}
+var _ socket.RawSockaddr = &rawHvsockAddr{}
+
// Network returns the address's network name, "hvsock".
-func (addr *HvsockAddr) Network() string {
+func (*HvsockAddr) Network() string {
return "hvsock"
}
@@ -46,14 +122,14 @@ func (addr *HvsockAddr) String() string {
// VsockServiceID returns an hvsock service ID corresponding to the specified AF_VSOCK port.
func VsockServiceID(port uint32) guid.GUID {
- g, _ := guid.FromString("00000000-facb-11e6-bd58-64006a7986d3")
+ g := hvsockVsockServiceTemplate() // make a copy
g.Data1 = port
return g
}
func (addr *HvsockAddr) raw() rawHvsockAddr {
return rawHvsockAddr{
- Family: afHvSock,
+ Family: afHVSock,
VMID: addr.VMID,
ServiceID: addr.ServiceID,
}
@@ -64,20 +140,48 @@ func (addr *HvsockAddr) fromRaw(raw *rawHvsockAddr) {
addr.ServiceID = raw.ServiceID
}
+// Sockaddr returns a pointer to and the size of this struct.
+//
+// Implements the [socket.RawSockaddr] interface, and allows use in
+// [socket.Bind] and [socket.ConnectEx].
+func (r *rawHvsockAddr) Sockaddr() (unsafe.Pointer, int32, error) {
+ return unsafe.Pointer(r), int32(unsafe.Sizeof(rawHvsockAddr{})), nil
+}
+
+// Sockaddr interface allows use with `sockets.Bind()` and `.ConnectEx()`.
+func (r *rawHvsockAddr) FromBytes(b []byte) error {
+ n := int(unsafe.Sizeof(rawHvsockAddr{}))
+
+ if len(b) < n {
+ return fmt.Errorf("got %d, want %d: %w", len(b), n, socket.ErrBufferSize)
+ }
+
+ copy(unsafe.Slice((*byte)(unsafe.Pointer(r)), n), b[:n])
+ if r.Family != afHVSock {
+ return fmt.Errorf("got %d, want %d: %w", r.Family, afHVSock, socket.ErrAddrFamily)
+ }
+
+ return nil
+}
+
// HvsockListener is a socket listener for the AF_HYPERV address family.
type HvsockListener struct {
sock *win32File
addr HvsockAddr
}
+var _ net.Listener = &HvsockListener{}
+
// HvsockConn is a connected socket of the AF_HYPERV address family.
type HvsockConn struct {
sock *win32File
local, remote HvsockAddr
}
-func newHvSocket() (*win32File, error) {
- fd, err := syscall.Socket(afHvSock, syscall.SOCK_STREAM, 1)
+var _ net.Conn = &HvsockConn{}
+
+func newHVSocket() (*win32File, error) {
+ fd, err := syscall.Socket(afHVSock, syscall.SOCK_STREAM, 1)
if err != nil {
return nil, os.NewSyscallError("socket", err)
}
@@ -93,12 +197,12 @@ func newHvSocket() (*win32File, error) {
// ListenHvsock listens for connections on the specified hvsock address.
func ListenHvsock(addr *HvsockAddr) (_ *HvsockListener, err error) {
l := &HvsockListener{addr: *addr}
- sock, err := newHvSocket()
+ sock, err := newHVSocket()
if err != nil {
return nil, l.opErr("listen", err)
}
sa := addr.raw()
- err = bind(sock.handle, unsafe.Pointer(&sa), int32(unsafe.Sizeof(sa)))
+ err = socket.Bind(windows.Handle(sock.handle), &sa)
if err != nil {
return nil, l.opErr("listen", os.NewSyscallError("socket", err))
}
@@ -120,7 +224,7 @@ func (l *HvsockListener) Addr() net.Addr {
// Accept waits for the next connection and returns it.
func (l *HvsockListener) Accept() (_ net.Conn, err error) {
- sock, err := newHvSocket()
+ sock, err := newHVSocket()
if err != nil {
return nil, l.opErr("accept", err)
}
@@ -129,27 +233,42 @@ func (l *HvsockListener) Accept() (_ net.Conn, err error) {
sock.Close()
}
}()
- c, err := l.sock.prepareIo()
+ c, err := l.sock.prepareIO()
if err != nil {
return nil, l.opErr("accept", err)
}
defer l.sock.wg.Done()
// AcceptEx, per documentation, requires an extra 16 bytes per address.
+ //
+ // https://docs.microsoft.com/en-us/windows/win32/api/mswsock/nf-mswsock-acceptex
const addrlen = uint32(16 + unsafe.Sizeof(rawHvsockAddr{}))
var addrbuf [addrlen * 2]byte
var bytes uint32
- err = syscall.AcceptEx(l.sock.handle, sock.handle, &addrbuf[0], 0, addrlen, addrlen, &bytes, &c.o)
- _, err = l.sock.asyncIo(c, nil, bytes, err)
- if err != nil {
+ err = syscall.AcceptEx(l.sock.handle, sock.handle, &addrbuf[0], 0 /*rxdatalen*/, addrlen, addrlen, &bytes, &c.o)
+ if _, err = l.sock.asyncIO(c, nil, bytes, err); err != nil {
return nil, l.opErr("accept", os.NewSyscallError("acceptex", err))
}
+
conn := &HvsockConn{
sock: sock,
}
+ // The local address returned in the AcceptEx buffer is the same as the Listener socket's
+ // address. However, the service GUID reported by GetSockName is different from the Listeners
+ // socket, and is sometimes the same as the local address of the socket that dialed the
+ // address, with the service GUID.Data1 incremented, but othertimes is different.
+ // todo: does the local address matter? is the listener's address or the actual address appropriate?
conn.local.fromRaw((*rawHvsockAddr)(unsafe.Pointer(&addrbuf[0])))
conn.remote.fromRaw((*rawHvsockAddr)(unsafe.Pointer(&addrbuf[addrlen])))
+
+ // initialize the accepted socket and update its properties with those of the listening socket
+ if err = windows.Setsockopt(windows.Handle(sock.handle),
+ windows.SOL_SOCKET, windows.SO_UPDATE_ACCEPT_CONTEXT,
+ (*byte)(unsafe.Pointer(&l.sock.handle)), int32(unsafe.Sizeof(l.sock.handle))); err != nil {
+ return nil, conn.opErr("accept", os.NewSyscallError("setsockopt", err))
+ }
+
sock = nil
return conn, nil
}
@@ -159,43 +278,171 @@ func (l *HvsockListener) Close() error {
return l.sock.Close()
}
-/* Need to finish ConnectEx handling
-func DialHvsock(ctx context.Context, addr *HvsockAddr) (*HvsockConn, error) {
- sock, err := newHvSocket()
+// HvsockDialer configures and dials a Hyper-V Socket (ie, [HvsockConn]).
+type HvsockDialer struct {
+ // Deadline is the time the Dial operation must connect before erroring.
+ Deadline time.Time
+
+ // Retries is the number of additional connects to try if the connection times out, is refused,
+ // or the host is unreachable
+ Retries uint
+
+ // RetryWait is the time to wait after a connection error to retry
+ RetryWait time.Duration
+
+ rt *time.Timer // redial wait timer
+}
+
+// Dial the Hyper-V socket at addr.
+//
+// See [HvsockDialer.Dial] for more information.
+func Dial(ctx context.Context, addr *HvsockAddr) (conn *HvsockConn, err error) {
+ return (&HvsockDialer{}).Dial(ctx, addr)
+}
+
+// Dial attempts to connect to the Hyper-V socket at addr, and returns a connection if successful.
+// Will attempt (HvsockDialer).Retries if dialing fails, waiting (HvsockDialer).RetryWait between
+// retries.
+//
+// Dialing can be cancelled either by providing (HvsockDialer).Deadline, or cancelling ctx.
+func (d *HvsockDialer) Dial(ctx context.Context, addr *HvsockAddr) (conn *HvsockConn, err error) {
+ op := "dial"
+ // create the conn early to use opErr()
+ conn = &HvsockConn{
+ remote: *addr,
+ }
+
+ if !d.Deadline.IsZero() {
+ var cancel context.CancelFunc
+ ctx, cancel = context.WithDeadline(ctx, d.Deadline)
+ defer cancel()
+ }
+
+ // preemptive timeout/cancellation check
+ if err = ctx.Err(); err != nil {
+ return nil, conn.opErr(op, err)
+ }
+
+ sock, err := newHVSocket()
if err != nil {
- return nil, err
+ return nil, conn.opErr(op, err)
}
defer func() {
if sock != nil {
sock.Close()
}
}()
- c, err := sock.prepareIo()
+
+ sa := addr.raw()
+ err = socket.Bind(windows.Handle(sock.handle), &sa)
if err != nil {
- return nil, err
+ return nil, conn.opErr(op, os.NewSyscallError("bind", err))
+ }
+
+ c, err := sock.prepareIO()
+ if err != nil {
+ return nil, conn.opErr(op, err)
}
defer sock.wg.Done()
var bytes uint32
- err = windows.ConnectEx(windows.Handle(sock.handle), sa, nil, 0, &bytes, &c.o)
- _, err = sock.asyncIo(ctx, c, nil, bytes, err)
+ for i := uint(0); i <= d.Retries; i++ {
+ err = socket.ConnectEx(
+ windows.Handle(sock.handle),
+ &sa,
+ nil, // sendBuf
+ 0, // sendDataLen
+ &bytes,
+ (*windows.Overlapped)(unsafe.Pointer(&c.o)))
+ _, err = sock.asyncIO(c, nil, bytes, err)
+ if i < d.Retries && canRedial(err) {
+ if err = d.redialWait(ctx); err == nil {
+ continue
+ }
+ }
+ break
+ }
if err != nil {
- return nil, err
+ return nil, conn.opErr(op, os.NewSyscallError("connectex", err))
}
- conn := &HvsockConn{
- sock: sock,
- remote: *addr,
+
+ // update the connection properties, so shutdown can be used
+ if err = windows.Setsockopt(
+ windows.Handle(sock.handle),
+ windows.SOL_SOCKET,
+ windows.SO_UPDATE_CONNECT_CONTEXT,
+ nil, // optvalue
+ 0, // optlen
+ ); err != nil {
+ return nil, conn.opErr(op, os.NewSyscallError("setsockopt", err))
+ }
+
+ // get the local name
+ var sal rawHvsockAddr
+ err = socket.GetSockName(windows.Handle(sock.handle), &sal)
+ if err != nil {
+ return nil, conn.opErr(op, os.NewSyscallError("getsockname", err))
+ }
+ conn.local.fromRaw(&sal)
+
+ // one last check for timeout, since asyncIO doesn't check the context
+ if err = ctx.Err(); err != nil {
+ return nil, conn.opErr(op, err)
}
+
+ conn.sock = sock
sock = nil
+
return conn, nil
}
-*/
+
+// redialWait waits before attempting to redial, resetting the timer as appropriate.
+func (d *HvsockDialer) redialWait(ctx context.Context) (err error) {
+ if d.RetryWait == 0 {
+ return nil
+ }
+
+ if d.rt == nil {
+ d.rt = time.NewTimer(d.RetryWait)
+ } else {
+ // should already be stopped and drained
+ d.rt.Reset(d.RetryWait)
+ }
+
+ select {
+ case <-ctx.Done():
+ case <-d.rt.C:
+ return nil
+ }
+
+ // stop and drain the timer
+ if !d.rt.Stop() {
+ <-d.rt.C
+ }
+ return ctx.Err()
+}
+
+// assumes error is a plain, unwrapped syscall.Errno provided by direct syscall.
+func canRedial(err error) bool {
+ //nolint:errorlint // guaranteed to be an Errno
+ switch err {
+ case windows.WSAECONNREFUSED, windows.WSAENETUNREACH, windows.WSAETIMEDOUT,
+ windows.ERROR_CONNECTION_REFUSED, windows.ERROR_CONNECTION_UNAVAIL:
+ return true
+ default:
+ return false
+ }
+}
func (conn *HvsockConn) opErr(op string, err error) error {
+ // translate from "file closed" to "socket closed"
+ if errors.Is(err, ErrFileClosed) {
+ err = socket.ErrSocketClosed
+ }
return &net.OpError{Op: op, Net: "hvsock", Source: &conn.local, Addr: &conn.remote, Err: err}
}
func (conn *HvsockConn) Read(b []byte) (int, error) {
- c, err := conn.sock.prepareIo()
+ c, err := conn.sock.prepareIO()
if err != nil {
return 0, conn.opErr("read", err)
}
@@ -203,10 +450,11 @@ func (conn *HvsockConn) Read(b []byte) (int, error) {
buf := syscall.WSABuf{Buf: &b[0], Len: uint32(len(b))}
var flags, bytes uint32
err = syscall.WSARecv(conn.sock.handle, &buf, 1, &bytes, &flags, &c.o, nil)
- n, err := conn.sock.asyncIo(c, &conn.sock.readDeadline, bytes, err)
+ n, err := conn.sock.asyncIO(c, &conn.sock.readDeadline, bytes, err)
if err != nil {
- if _, ok := err.(syscall.Errno); ok {
- err = os.NewSyscallError("wsarecv", err)
+ var eno windows.Errno
+ if errors.As(err, &eno) {
+ err = os.NewSyscallError("wsarecv", eno)
}
return 0, conn.opErr("read", err)
} else if n == 0 {
@@ -229,7 +477,7 @@ func (conn *HvsockConn) Write(b []byte) (int, error) {
}
func (conn *HvsockConn) write(b []byte) (int, error) {
- c, err := conn.sock.prepareIo()
+ c, err := conn.sock.prepareIO()
if err != nil {
return 0, conn.opErr("write", err)
}
@@ -237,10 +485,11 @@ func (conn *HvsockConn) write(b []byte) (int, error) {
buf := syscall.WSABuf{Buf: &b[0], Len: uint32(len(b))}
var bytes uint32
err = syscall.WSASend(conn.sock.handle, &buf, 1, &bytes, 0, &c.o, nil)
- n, err := conn.sock.asyncIo(c, &conn.sock.writeDeadline, bytes, err)
+ n, err := conn.sock.asyncIO(c, &conn.sock.writeDeadline, bytes, err)
if err != nil {
- if _, ok := err.(syscall.Errno); ok {
- err = os.NewSyscallError("wsasend", err)
+ var eno windows.Errno
+ if errors.As(err, &eno) {
+ err = os.NewSyscallError("wsasend", eno)
}
return 0, conn.opErr("write", err)
}
@@ -252,29 +501,43 @@ func (conn *HvsockConn) Close() error {
return conn.sock.Close()
}
+func (conn *HvsockConn) IsClosed() bool {
+ return conn.sock.IsClosed()
+}
+
+// shutdown disables sending or receiving on a socket.
func (conn *HvsockConn) shutdown(how int) error {
- err := syscall.Shutdown(conn.sock.handle, syscall.SHUT_RD)
+ if conn.IsClosed() {
+ return socket.ErrSocketClosed
+ }
+
+ err := syscall.Shutdown(conn.sock.handle, how)
if err != nil {
+ // If the connection was closed, shutdowns fail with "not connected"
+ if errors.Is(err, windows.WSAENOTCONN) ||
+ errors.Is(err, windows.WSAESHUTDOWN) {
+ err = socket.ErrSocketClosed
+ }
return os.NewSyscallError("shutdown", err)
}
return nil
}
-// CloseRead shuts down the read end of the socket.
+// CloseRead shuts down the read end of the socket, preventing future read operations.
func (conn *HvsockConn) CloseRead() error {
err := conn.shutdown(syscall.SHUT_RD)
if err != nil {
- return conn.opErr("close", err)
+ return conn.opErr("closeread", err)
}
return nil
}
-// CloseWrite shuts down the write end of the socket, notifying the other endpoint that
-// no more data will be written.
+// CloseWrite shuts down the write end of the socket, preventing future write operations and
+// notifying the other endpoint that no more data will be written.
func (conn *HvsockConn) CloseWrite() error {
err := conn.shutdown(syscall.SHUT_WR)
if err != nil {
- return conn.opErr("close", err)
+ return conn.opErr("closewrite", err)
}
return nil
}
@@ -291,8 +554,13 @@ func (conn *HvsockConn) RemoteAddr() net.Addr {
// SetDeadline implements the net.Conn SetDeadline method.
func (conn *HvsockConn) SetDeadline(t time.Time) error {
- conn.SetReadDeadline(t)
- conn.SetWriteDeadline(t)
+ // todo: implement `SetDeadline` for `win32File`
+ if err := conn.SetReadDeadline(t); err != nil {
+ return fmt.Errorf("set read deadline: %w", err)
+ }
+ if err := conn.SetWriteDeadline(t); err != nil {
+ return fmt.Errorf("set write deadline: %w", err)
+ }
return nil
}
diff --git a/vendor/github.com/Microsoft/go-winio/internal/socket/rawaddr.go b/vendor/github.com/Microsoft/go-winio/internal/socket/rawaddr.go
new file mode 100644
index 0000000000..7e82f9afa9
--- /dev/null
+++ b/vendor/github.com/Microsoft/go-winio/internal/socket/rawaddr.go
@@ -0,0 +1,20 @@
+package socket
+
+import (
+ "unsafe"
+)
+
+// RawSockaddr allows structs to be used with [Bind] and [ConnectEx]. The
+// struct must meet the Win32 sockaddr requirements specified here:
+// https://docs.microsoft.com/en-us/windows/win32/winsock/sockaddr-2
+//
+// Specifically, the struct size must be least larger than an int16 (unsigned short)
+// for the address family.
+type RawSockaddr interface {
+ // Sockaddr returns a pointer to the RawSockaddr and its struct size, allowing
+ // for the RawSockaddr's data to be overwritten by syscalls (if necessary).
+ //
+ // It is the callers responsibility to validate that the values are valid; invalid
+ // pointers or size can cause a panic.
+ Sockaddr() (unsafe.Pointer, int32, error)
+}
diff --git a/vendor/github.com/Microsoft/go-winio/internal/socket/socket.go b/vendor/github.com/Microsoft/go-winio/internal/socket/socket.go
new file mode 100644
index 0000000000..39e8c05f8f
--- /dev/null
+++ b/vendor/github.com/Microsoft/go-winio/internal/socket/socket.go
@@ -0,0 +1,179 @@
+//go:build windows
+
+package socket
+
+import (
+ "errors"
+ "fmt"
+ "net"
+ "sync"
+ "syscall"
+ "unsafe"
+
+ "github.com/Microsoft/go-winio/pkg/guid"
+ "golang.org/x/sys/windows"
+)
+
+//go:generate go run github.com/Microsoft/go-winio/tools/mkwinsyscall -output zsyscall_windows.go socket.go
+
+//sys getsockname(s windows.Handle, name unsafe.Pointer, namelen *int32) (err error) [failretval==socketError] = ws2_32.getsockname
+//sys getpeername(s windows.Handle, name unsafe.Pointer, namelen *int32) (err error) [failretval==socketError] = ws2_32.getpeername
+//sys bind(s windows.Handle, name unsafe.Pointer, namelen int32) (err error) [failretval==socketError] = ws2_32.bind
+
+const socketError = uintptr(^uint32(0))
+
+var (
+ // todo(helsaawy): create custom error types to store the desired vs actual size and addr family?
+
+ ErrBufferSize = errors.New("buffer size")
+ ErrAddrFamily = errors.New("address family")
+ ErrInvalidPointer = errors.New("invalid pointer")
+ ErrSocketClosed = fmt.Errorf("socket closed: %w", net.ErrClosed)
+)
+
+// todo(helsaawy): replace these with generics, ie: GetSockName[S RawSockaddr](s windows.Handle) (S, error)
+
+// GetSockName writes the local address of socket s to the [RawSockaddr] rsa.
+// If rsa is not large enough, the [windows.WSAEFAULT] is returned.
+func GetSockName(s windows.Handle, rsa RawSockaddr) error {
+ ptr, l, err := rsa.Sockaddr()
+ if err != nil {
+ return fmt.Errorf("could not retrieve socket pointer and size: %w", err)
+ }
+
+ // although getsockname returns WSAEFAULT if the buffer is too small, it does not set
+ // &l to the correct size, so--apart from doubling the buffer repeatedly--there is no remedy
+ return getsockname(s, ptr, &l)
+}
+
+// GetPeerName returns the remote address the socket is connected to.
+//
+// See [GetSockName] for more information.
+func GetPeerName(s windows.Handle, rsa RawSockaddr) error {
+ ptr, l, err := rsa.Sockaddr()
+ if err != nil {
+ return fmt.Errorf("could not retrieve socket pointer and size: %w", err)
+ }
+
+ return getpeername(s, ptr, &l)
+}
+
+func Bind(s windows.Handle, rsa RawSockaddr) (err error) {
+ ptr, l, err := rsa.Sockaddr()
+ if err != nil {
+ return fmt.Errorf("could not retrieve socket pointer and size: %w", err)
+ }
+
+ return bind(s, ptr, l)
+}
+
+// "golang.org/x/sys/windows".ConnectEx and .Bind only accept internal implementations of the
+// their sockaddr interface, so they cannot be used with HvsockAddr
+// Replicate functionality here from
+// https://cs.opensource.google/go/x/sys/+/master:windows/syscall_windows.go
+
+// The function pointers to `AcceptEx`, `ConnectEx` and `GetAcceptExSockaddrs` must be loaded at
+// runtime via a WSAIoctl call:
+// https://docs.microsoft.com/en-us/windows/win32/api/Mswsock/nc-mswsock-lpfn_connectex#remarks
+
+type runtimeFunc struct {
+ id guid.GUID
+ once sync.Once
+ addr uintptr
+ err error
+}
+
+func (f *runtimeFunc) Load() error {
+ f.once.Do(func() {
+ var s windows.Handle
+ s, f.err = windows.Socket(windows.AF_INET, windows.SOCK_STREAM, windows.IPPROTO_TCP)
+ if f.err != nil {
+ return
+ }
+ defer windows.CloseHandle(s) //nolint:errcheck
+
+ var n uint32
+ f.err = windows.WSAIoctl(s,
+ windows.SIO_GET_EXTENSION_FUNCTION_POINTER,
+ (*byte)(unsafe.Pointer(&f.id)),
+ uint32(unsafe.Sizeof(f.id)),
+ (*byte)(unsafe.Pointer(&f.addr)),
+ uint32(unsafe.Sizeof(f.addr)),
+ &n,
+ nil, //overlapped
+ 0, //completionRoutine
+ )
+ })
+ return f.err
+}
+
+var (
+ // todo: add `AcceptEx` and `GetAcceptExSockaddrs`
+ WSAID_CONNECTEX = guid.GUID{ //revive:disable-line:var-naming ALL_CAPS
+ Data1: 0x25a207b9,
+ Data2: 0xddf3,
+ Data3: 0x4660,
+ Data4: [8]byte{0x8e, 0xe9, 0x76, 0xe5, 0x8c, 0x74, 0x06, 0x3e},
+ }
+
+ connectExFunc = runtimeFunc{id: WSAID_CONNECTEX}
+)
+
+func ConnectEx(
+ fd windows.Handle,
+ rsa RawSockaddr,
+ sendBuf *byte,
+ sendDataLen uint32,
+ bytesSent *uint32,
+ overlapped *windows.Overlapped,
+) error {
+ if err := connectExFunc.Load(); err != nil {
+ return fmt.Errorf("failed to load ConnectEx function pointer: %w", err)
+ }
+ ptr, n, err := rsa.Sockaddr()
+ if err != nil {
+ return err
+ }
+ return connectEx(fd, ptr, n, sendBuf, sendDataLen, bytesSent, overlapped)
+}
+
+// BOOL LpfnConnectex(
+// [in] SOCKET s,
+// [in] const sockaddr *name,
+// [in] int namelen,
+// [in, optional] PVOID lpSendBuffer,
+// [in] DWORD dwSendDataLength,
+// [out] LPDWORD lpdwBytesSent,
+// [in] LPOVERLAPPED lpOverlapped
+// )
+
+func connectEx(
+ s windows.Handle,
+ name unsafe.Pointer,
+ namelen int32,
+ sendBuf *byte,
+ sendDataLen uint32,
+ bytesSent *uint32,
+ overlapped *windows.Overlapped,
+) (err error) {
+ // todo: after upgrading to 1.18, switch from syscall.Syscall9 to syscall.SyscallN
+ r1, _, e1 := syscall.Syscall9(connectExFunc.addr,
+ 7,
+ uintptr(s),
+ uintptr(name),
+ uintptr(namelen),
+ uintptr(unsafe.Pointer(sendBuf)),
+ uintptr(sendDataLen),
+ uintptr(unsafe.Pointer(bytesSent)),
+ uintptr(unsafe.Pointer(overlapped)),
+ 0,
+ 0)
+ if r1 == 0 {
+ if e1 != 0 {
+ err = error(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return err
+}
diff --git a/vendor/github.com/Microsoft/go-winio/internal/socket/zsyscall_windows.go b/vendor/github.com/Microsoft/go-winio/internal/socket/zsyscall_windows.go
new file mode 100644
index 0000000000..6d2e1a9e44
--- /dev/null
+++ b/vendor/github.com/Microsoft/go-winio/internal/socket/zsyscall_windows.go
@@ -0,0 +1,72 @@
+//go:build windows
+
+// Code generated by 'go generate' using "github.com/Microsoft/go-winio/tools/mkwinsyscall"; DO NOT EDIT.
+
+package socket
+
+import (
+ "syscall"
+ "unsafe"
+
+ "golang.org/x/sys/windows"
+)
+
+var _ unsafe.Pointer
+
+// Do the interface allocations only once for common
+// Errno values.
+const (
+ errnoERROR_IO_PENDING = 997
+)
+
+var (
+ errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING)
+ errERROR_EINVAL error = syscall.EINVAL
+)
+
+// errnoErr returns common boxed Errno values, to prevent
+// allocations at runtime.
+func errnoErr(e syscall.Errno) error {
+ switch e {
+ case 0:
+ return errERROR_EINVAL
+ case errnoERROR_IO_PENDING:
+ return errERROR_IO_PENDING
+ }
+ // TODO: add more here, after collecting data on the common
+ // error values see on Windows. (perhaps when running
+ // all.bat?)
+ return e
+}
+
+var (
+ modws2_32 = windows.NewLazySystemDLL("ws2_32.dll")
+
+ procbind = modws2_32.NewProc("bind")
+ procgetpeername = modws2_32.NewProc("getpeername")
+ procgetsockname = modws2_32.NewProc("getsockname")
+)
+
+func bind(s windows.Handle, name unsafe.Pointer, namelen int32) (err error) {
+ r1, _, e1 := syscall.Syscall(procbind.Addr(), 3, uintptr(s), uintptr(name), uintptr(namelen))
+ if r1 == socketError {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func getpeername(s windows.Handle, name unsafe.Pointer, namelen *int32) (err error) {
+ r1, _, e1 := syscall.Syscall(procgetpeername.Addr(), 3, uintptr(s), uintptr(name), uintptr(unsafe.Pointer(namelen)))
+ if r1 == socketError {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func getsockname(s windows.Handle, name unsafe.Pointer, namelen *int32) (err error) {
+ r1, _, e1 := syscall.Syscall(procgetsockname.Addr(), 3, uintptr(s), uintptr(name), uintptr(unsafe.Pointer(namelen)))
+ if r1 == socketError {
+ err = errnoErr(e1)
+ }
+ return
+}
diff --git a/vendor/github.com/Microsoft/go-winio/pipe.go b/vendor/github.com/Microsoft/go-winio/pipe.go
index 96700a73de..ca6e38fc00 100644
--- a/vendor/github.com/Microsoft/go-winio/pipe.go
+++ b/vendor/github.com/Microsoft/go-winio/pipe.go
@@ -1,3 +1,4 @@
+//go:build windows
// +build windows
package winio
@@ -13,6 +14,8 @@ import (
"syscall"
"time"
"unsafe"
+
+ "golang.org/x/sys/windows"
)
//sys connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) = ConnectNamedPipe
@@ -21,10 +24,10 @@ import (
//sys getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) = GetNamedPipeInfo
//sys getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW
//sys localAlloc(uFlags uint32, length uint32) (ptr uintptr) = LocalAlloc
-//sys ntCreateNamedPipeFile(pipe *syscall.Handle, access uint32, oa *objectAttributes, iosb *ioStatusBlock, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntstatus) = ntdll.NtCreateNamedPipeFile
-//sys rtlNtStatusToDosError(status ntstatus) (winerr error) = ntdll.RtlNtStatusToDosErrorNoTeb
-//sys rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntstatus) = ntdll.RtlDosPathNameToNtPathName_U
-//sys rtlDefaultNpAcl(dacl *uintptr) (status ntstatus) = ntdll.RtlDefaultNpAcl
+//sys ntCreateNamedPipeFile(pipe *syscall.Handle, access uint32, oa *objectAttributes, iosb *ioStatusBlock, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntStatus) = ntdll.NtCreateNamedPipeFile
+//sys rtlNtStatusToDosError(status ntStatus) (winerr error) = ntdll.RtlNtStatusToDosErrorNoTeb
+//sys rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntStatus) = ntdll.RtlDosPathNameToNtPathName_U
+//sys rtlDefaultNpAcl(dacl *uintptr) (status ntStatus) = ntdll.RtlDefaultNpAcl
type ioStatusBlock struct {
Status, Information uintptr
@@ -51,45 +54,22 @@ type securityDescriptor struct {
Control uint16
Owner uintptr
Group uintptr
- Sacl uintptr
- Dacl uintptr
+ Sacl uintptr //revive:disable-line:var-naming SACL, not Sacl
+ Dacl uintptr //revive:disable-line:var-naming DACL, not Dacl
}
-type ntstatus int32
+type ntStatus int32
-func (status ntstatus) Err() error {
+func (status ntStatus) Err() error {
if status >= 0 {
return nil
}
return rtlNtStatusToDosError(status)
}
-const (
- cERROR_PIPE_BUSY = syscall.Errno(231)
- cERROR_NO_DATA = syscall.Errno(232)
- cERROR_PIPE_CONNECTED = syscall.Errno(535)
- cERROR_SEM_TIMEOUT = syscall.Errno(121)
-
- cSECURITY_SQOS_PRESENT = 0x100000
- cSECURITY_ANONYMOUS = 0
-
- cPIPE_TYPE_MESSAGE = 4
-
- cPIPE_READMODE_MESSAGE = 2
-
- cFILE_OPEN = 1
- cFILE_CREATE = 2
-
- cFILE_PIPE_MESSAGE_TYPE = 1
- cFILE_PIPE_REJECT_REMOTE_CLIENTS = 2
-
- cSE_DACL_PRESENT = 4
-)
-
var (
// ErrPipeListenerClosed is returned for pipe operations on listeners that have been closed.
- // This error should match net.errClosing since docker takes a dependency on its text.
- ErrPipeListenerClosed = errors.New("use of closed network connection")
+ ErrPipeListenerClosed = net.ErrClosed
errPipeWriteClosed = errors.New("pipe has been closed for write")
)
@@ -116,9 +96,10 @@ func (f *win32Pipe) RemoteAddr() net.Addr {
}
func (f *win32Pipe) SetDeadline(t time.Time) error {
- f.SetReadDeadline(t)
- f.SetWriteDeadline(t)
- return nil
+ if err := f.SetReadDeadline(t); err != nil {
+ return err
+ }
+ return f.SetWriteDeadline(t)
}
// CloseWrite closes the write side of a message pipe in byte mode.
@@ -157,14 +138,14 @@ func (f *win32MessageBytePipe) Read(b []byte) (int, error) {
return 0, io.EOF
}
n, err := f.win32File.Read(b)
- if err == io.EOF {
+ if err == io.EOF { //nolint:errorlint
// If this was the result of a zero-byte read, then
// it is possible that the read was due to a zero-size
// message. Since we are simulating CloseWrite with a
// zero-byte message, ensure that all future Read() calls
// also return EOF.
f.readEOF = true
- } else if err == syscall.ERROR_MORE_DATA {
+ } else if err == syscall.ERROR_MORE_DATA { //nolint:errorlint // err is Errno
// ERROR_MORE_DATA indicates that the pipe's read mode is message mode
// and the message still has more bytes. Treat this as a success, since
// this package presents all named pipes as byte streams.
@@ -173,7 +154,7 @@ func (f *win32MessageBytePipe) Read(b []byte) (int, error) {
return n, err
}
-func (s pipeAddress) Network() string {
+func (pipeAddress) Network() string {
return "pipe"
}
@@ -184,16 +165,21 @@ func (s pipeAddress) String() string {
// tryDialPipe attempts to dial the pipe at `path` until `ctx` cancellation or timeout.
func tryDialPipe(ctx context.Context, path *string, access uint32) (syscall.Handle, error) {
for {
-
select {
case <-ctx.Done():
return syscall.Handle(0), ctx.Err()
default:
- h, err := createFile(*path, access, 0, nil, syscall.OPEN_EXISTING, syscall.FILE_FLAG_OVERLAPPED|cSECURITY_SQOS_PRESENT|cSECURITY_ANONYMOUS, 0)
+ h, err := createFile(*path,
+ access,
+ 0,
+ nil,
+ syscall.OPEN_EXISTING,
+ windows.FILE_FLAG_OVERLAPPED|windows.SECURITY_SQOS_PRESENT|windows.SECURITY_ANONYMOUS,
+ 0)
if err == nil {
return h, nil
}
- if err != cERROR_PIPE_BUSY {
+ if err != windows.ERROR_PIPE_BUSY { //nolint:errorlint // err is Errno
return h, &os.PathError{Err: err, Op: "open", Path: *path}
}
// Wait 10 msec and try again. This is a rather simplistic
@@ -213,9 +199,10 @@ func DialPipe(path string, timeout *time.Duration) (net.Conn, error) {
} else {
absTimeout = time.Now().Add(2 * time.Second)
}
- ctx, _ := context.WithDeadline(context.Background(), absTimeout)
+ ctx, cancel := context.WithDeadline(context.Background(), absTimeout)
+ defer cancel()
conn, err := DialPipeContext(ctx, path)
- if err == context.DeadlineExceeded {
+ if errors.Is(err, context.DeadlineExceeded) {
return nil, ErrTimeout
}
return conn, err
@@ -251,7 +238,7 @@ func DialPipeAccess(ctx context.Context, path string, access uint32) (net.Conn,
// If the pipe is in message mode, return a message byte pipe, which
// supports CloseWrite().
- if flags&cPIPE_TYPE_MESSAGE != 0 {
+ if flags&windows.PIPE_TYPE_MESSAGE != 0 {
return &win32MessageBytePipe{
win32Pipe: win32Pipe{win32File: f, path: path},
}, nil
@@ -283,7 +270,11 @@ func makeServerPipeHandle(path string, sd []byte, c *PipeConfig, first bool) (sy
oa.Length = unsafe.Sizeof(oa)
var ntPath unicodeString
- if err := rtlDosPathNameToNtPathName(&path16[0], &ntPath, 0, 0).Err(); err != nil {
+ if err := rtlDosPathNameToNtPathName(&path16[0],
+ &ntPath,
+ 0,
+ 0,
+ ).Err(); err != nil {
return 0, &os.PathError{Op: "open", Path: path, Err: err}
}
defer localFree(ntPath.Buffer)
@@ -292,8 +283,8 @@ func makeServerPipeHandle(path string, sd []byte, c *PipeConfig, first bool) (sy
// The security descriptor is only needed for the first pipe.
if first {
if sd != nil {
- len := uint32(len(sd))
- sdb := localAlloc(0, len)
+ l := uint32(len(sd))
+ sdb := localAlloc(0, l)
defer localFree(sdb)
copy((*[0xffff]byte)(unsafe.Pointer(sdb))[:], sd)
oa.SecurityDescriptor = (*securityDescriptor)(unsafe.Pointer(sdb))
@@ -301,28 +292,28 @@ func makeServerPipeHandle(path string, sd []byte, c *PipeConfig, first bool) (sy
// Construct the default named pipe security descriptor.
var dacl uintptr
if err := rtlDefaultNpAcl(&dacl).Err(); err != nil {
- return 0, fmt.Errorf("getting default named pipe ACL: %s", err)
+ return 0, fmt.Errorf("getting default named pipe ACL: %w", err)
}
defer localFree(dacl)
sdb := &securityDescriptor{
Revision: 1,
- Control: cSE_DACL_PRESENT,
+ Control: windows.SE_DACL_PRESENT,
Dacl: dacl,
}
oa.SecurityDescriptor = sdb
}
}
- typ := uint32(cFILE_PIPE_REJECT_REMOTE_CLIENTS)
+ typ := uint32(windows.FILE_PIPE_REJECT_REMOTE_CLIENTS)
if c.MessageMode {
- typ |= cFILE_PIPE_MESSAGE_TYPE
+ typ |= windows.FILE_PIPE_MESSAGE_TYPE
}
- disposition := uint32(cFILE_OPEN)
+ disposition := uint32(windows.FILE_OPEN)
access := uint32(syscall.GENERIC_READ | syscall.GENERIC_WRITE | syscall.SYNCHRONIZE)
if first {
- disposition = cFILE_CREATE
+ disposition = windows.FILE_CREATE
// By not asking for read or write access, the named pipe file system
// will put this pipe into an initially disconnected state, blocking
// client connections until the next call with first == false.
@@ -335,7 +326,20 @@ func makeServerPipeHandle(path string, sd []byte, c *PipeConfig, first bool) (sy
h syscall.Handle
iosb ioStatusBlock
)
- err = ntCreateNamedPipeFile(&h, access, &oa, &iosb, syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE, disposition, 0, typ, 0, 0, 0xffffffff, uint32(c.InputBufferSize), uint32(c.OutputBufferSize), &timeout).Err()
+ err = ntCreateNamedPipeFile(&h,
+ access,
+ &oa,
+ &iosb,
+ syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE,
+ disposition,
+ 0,
+ typ,
+ 0,
+ 0,
+ 0xffffffff,
+ uint32(c.InputBufferSize),
+ uint32(c.OutputBufferSize),
+ &timeout).Err()
if err != nil {
return 0, &os.PathError{Op: "open", Path: path, Err: err}
}
@@ -380,7 +384,7 @@ func (l *win32PipeListener) makeConnectedServerPipe() (*win32File, error) {
p.Close()
p = nil
err = <-ch
- if err == nil || err == ErrFileClosed {
+ if err == nil || err == ErrFileClosed { //nolint:errorlint // err is Errno
err = ErrPipeListenerClosed
}
}
@@ -402,12 +406,12 @@ func (l *win32PipeListener) listenerRoutine() {
p, err = l.makeConnectedServerPipe()
// If the connection was immediately closed by the client, try
// again.
- if err != cERROR_NO_DATA {
+ if err != windows.ERROR_NO_DATA { //nolint:errorlint // err is Errno
break
}
}
responseCh <- acceptResponse{p, err}
- closed = err == ErrPipeListenerClosed
+ closed = err == ErrPipeListenerClosed //nolint:errorlint // err is Errno
}
}
syscall.Close(l.firstHandle)
@@ -469,15 +473,15 @@ func ListenPipe(path string, c *PipeConfig) (net.Listener, error) {
}
func connectPipe(p *win32File) error {
- c, err := p.prepareIo()
+ c, err := p.prepareIO()
if err != nil {
return err
}
defer p.wg.Done()
err = connectNamedPipe(p.handle, &c.o)
- _, err = p.asyncIo(c, nil, 0, err)
- if err != nil && err != cERROR_PIPE_CONNECTED {
+ _, err = p.asyncIO(c, nil, 0, err)
+ if err != nil && err != windows.ERROR_PIPE_CONNECTED { //nolint:errorlint // err is Errno
return err
}
return nil
diff --git a/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go b/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go
index f497c0e391..48ce4e9243 100644
--- a/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go
+++ b/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go
@@ -1,5 +1,3 @@
-// +build windows
-
// Package guid provides a GUID type. The backing structure for a GUID is
// identical to that used by the golang.org/x/sys/windows GUID type.
// There are two main binary encodings used for a GUID, the big-endian encoding,
@@ -9,26 +7,26 @@ package guid
import (
"crypto/rand"
- "crypto/sha1"
+ "crypto/sha1" //nolint:gosec // not used for secure application
"encoding"
"encoding/binary"
"fmt"
"strconv"
-
- "golang.org/x/sys/windows"
)
+//go:generate go run golang.org/x/tools/cmd/stringer -type=Variant -trimprefix=Variant -linecomment
+
// Variant specifies which GUID variant (or "type") of the GUID. It determines
// how the entirety of the rest of the GUID is interpreted.
type Variant uint8
-// The variants specified by RFC 4122.
+// The variants specified by RFC 4122 section 4.1.1.
const (
// VariantUnknown specifies a GUID variant which does not conform to one of
// the variant encodings specified in RFC 4122.
VariantUnknown Variant = iota
VariantNCS
- VariantRFC4122
+ VariantRFC4122 // RFC 4122
VariantMicrosoft
VariantFuture
)
@@ -38,16 +36,13 @@ const (
// hash of an input string.
type Version uint8
+func (v Version) String() string {
+ return strconv.FormatUint(uint64(v), 10)
+}
+
var _ = (encoding.TextMarshaler)(GUID{})
var _ = (encoding.TextUnmarshaler)(&GUID{})
-// GUID represents a GUID/UUID. It has the same structure as
-// golang.org/x/sys/windows.GUID so that it can be used with functions expecting
-// that type. It is defined as its own type so that stringification and
-// marshaling can be supported. The representation matches that used by native
-// Windows code.
-type GUID windows.GUID
-
// NewV4 returns a new version 4 (pseudorandom) GUID, as defined by RFC 4122.
func NewV4() (GUID, error) {
var b [16]byte
@@ -70,7 +65,7 @@ func NewV4() (GUID, error) {
// big-endian UTF16 stream of bytes. If that is desired, the string can be
// encoded as such before being passed to this function.
func NewV5(namespace GUID, name []byte) (GUID, error) {
- b := sha1.New()
+ b := sha1.New() //nolint:gosec // not used for secure application
namespaceBytes := namespace.ToArray()
b.Write(namespaceBytes[:])
b.Write(name)
diff --git a/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_nonwindows.go b/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_nonwindows.go
new file mode 100644
index 0000000000..805bd35484
--- /dev/null
+++ b/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_nonwindows.go
@@ -0,0 +1,16 @@
+//go:build !windows
+// +build !windows
+
+package guid
+
+// GUID represents a GUID/UUID. It has the same structure as
+// golang.org/x/sys/windows.GUID so that it can be used with functions expecting
+// that type. It is defined as its own type as that is only available to builds
+// targeted at `windows`. The representation matches that used by native Windows
+// code.
+type GUID struct {
+ Data1 uint32
+ Data2 uint16
+ Data3 uint16
+ Data4 [8]byte
+}
diff --git a/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_windows.go b/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_windows.go
new file mode 100644
index 0000000000..27e45ee5cc
--- /dev/null
+++ b/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_windows.go
@@ -0,0 +1,13 @@
+//go:build windows
+// +build windows
+
+package guid
+
+import "golang.org/x/sys/windows"
+
+// GUID represents a GUID/UUID. It has the same structure as
+// golang.org/x/sys/windows.GUID so that it can be used with functions expecting
+// that type. It is defined as its own type so that stringification and
+// marshaling can be supported. The representation matches that used by native
+// Windows code.
+type GUID windows.GUID
diff --git a/vendor/github.com/Microsoft/go-winio/pkg/guid/variant_string.go b/vendor/github.com/Microsoft/go-winio/pkg/guid/variant_string.go
new file mode 100644
index 0000000000..4076d3132f
--- /dev/null
+++ b/vendor/github.com/Microsoft/go-winio/pkg/guid/variant_string.go
@@ -0,0 +1,27 @@
+// Code generated by "stringer -type=Variant -trimprefix=Variant -linecomment"; DO NOT EDIT.
+
+package guid
+
+import "strconv"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[VariantUnknown-0]
+ _ = x[VariantNCS-1]
+ _ = x[VariantRFC4122-2]
+ _ = x[VariantMicrosoft-3]
+ _ = x[VariantFuture-4]
+}
+
+const _Variant_name = "UnknownNCSRFC 4122MicrosoftFuture"
+
+var _Variant_index = [...]uint8{0, 7, 10, 18, 27, 33}
+
+func (i Variant) String() string {
+ if i >= Variant(len(_Variant_index)-1) {
+ return "Variant(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+ return _Variant_name[_Variant_index[i]:_Variant_index[i+1]]
+}
diff --git a/vendor/github.com/Microsoft/go-winio/privilege.go b/vendor/github.com/Microsoft/go-winio/privilege.go
index 9c83d36fe5..0ff9dac906 100644
--- a/vendor/github.com/Microsoft/go-winio/privilege.go
+++ b/vendor/github.com/Microsoft/go-winio/privilege.go
@@ -1,3 +1,4 @@
+//go:build windows
// +build windows
package winio
@@ -24,19 +25,15 @@ import (
//sys lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) = advapi32.LookupPrivilegeDisplayNameW
const (
- SE_PRIVILEGE_ENABLED = 2
+ //revive:disable-next-line:var-naming ALL_CAPS
+ SE_PRIVILEGE_ENABLED = windows.SE_PRIVILEGE_ENABLED
- ERROR_NOT_ALL_ASSIGNED syscall.Errno = 1300
+ //revive:disable-next-line:var-naming ALL_CAPS
+ ERROR_NOT_ALL_ASSIGNED syscall.Errno = windows.ERROR_NOT_ALL_ASSIGNED
- SeBackupPrivilege = "SeBackupPrivilege"
- SeRestorePrivilege = "SeRestorePrivilege"
-)
-
-const (
- securityAnonymous = iota
- securityIdentification
- securityImpersonation
- securityDelegation
+ SeBackupPrivilege = "SeBackupPrivilege"
+ SeRestorePrivilege = "SeRestorePrivilege"
+ SeSecurityPrivilege = "SeSecurityPrivilege"
)
var (
@@ -50,11 +47,9 @@ type PrivilegeError struct {
}
func (e *PrivilegeError) Error() string {
- s := ""
+ s := "Could not enable privilege "
if len(e.privileges) > 1 {
s = "Could not enable privileges "
- } else {
- s = "Could not enable privilege "
}
for i, p := range e.privileges {
if i != 0 {
@@ -93,7 +88,7 @@ func RunWithPrivileges(names []string, fn func() error) error {
}
func mapPrivileges(names []string) ([]uint64, error) {
- var privileges []uint64
+ privileges := make([]uint64, 0, len(names))
privNameMutex.Lock()
defer privNameMutex.Unlock()
for _, name := range names {
@@ -126,7 +121,7 @@ func enableDisableProcessPrivilege(names []string, action uint32) error {
return err
}
- p, _ := windows.GetCurrentProcess()
+ p := windows.CurrentProcess()
var token windows.Token
err = windows.OpenProcessToken(p, windows.TOKEN_ADJUST_PRIVILEGES|windows.TOKEN_QUERY, &token)
if err != nil {
@@ -139,10 +134,10 @@ func enableDisableProcessPrivilege(names []string, action uint32) error {
func adjustPrivileges(token windows.Token, privileges []uint64, action uint32) error {
var b bytes.Buffer
- binary.Write(&b, binary.LittleEndian, uint32(len(privileges)))
+ _ = binary.Write(&b, binary.LittleEndian, uint32(len(privileges)))
for _, p := range privileges {
- binary.Write(&b, binary.LittleEndian, p)
- binary.Write(&b, binary.LittleEndian, action)
+ _ = binary.Write(&b, binary.LittleEndian, p)
+ _ = binary.Write(&b, binary.LittleEndian, action)
}
prevState := make([]byte, b.Len())
reqSize := uint32(0)
@@ -150,7 +145,7 @@ func adjustPrivileges(token windows.Token, privileges []uint64, action uint32) e
if !success {
return err
}
- if err == ERROR_NOT_ALL_ASSIGNED {
+ if err == ERROR_NOT_ALL_ASSIGNED { //nolint:errorlint // err is Errno
return &PrivilegeError{privileges}
}
return nil
@@ -176,7 +171,7 @@ func getPrivilegeName(luid uint64) string {
}
func newThreadToken() (windows.Token, error) {
- err := impersonateSelf(securityImpersonation)
+ err := impersonateSelf(windows.SecurityImpersonation)
if err != nil {
return 0, err
}
diff --git a/vendor/github.com/Microsoft/go-winio/reparse.go b/vendor/github.com/Microsoft/go-winio/reparse.go
index fc1ee4d3a3..67d1a104a6 100644
--- a/vendor/github.com/Microsoft/go-winio/reparse.go
+++ b/vendor/github.com/Microsoft/go-winio/reparse.go
@@ -1,3 +1,6 @@
+//go:build windows
+// +build windows
+
package winio
import (
@@ -113,16 +116,16 @@ func EncodeReparsePoint(rp *ReparsePoint) []byte {
}
var b bytes.Buffer
- binary.Write(&b, binary.LittleEndian, &data)
+ _ = binary.Write(&b, binary.LittleEndian, &data)
if !rp.IsMountPoint {
flags := uint32(0)
if relative {
flags |= 1
}
- binary.Write(&b, binary.LittleEndian, flags)
+ _ = binary.Write(&b, binary.LittleEndian, flags)
}
- binary.Write(&b, binary.LittleEndian, ntTarget16)
- binary.Write(&b, binary.LittleEndian, target16)
+ _ = binary.Write(&b, binary.LittleEndian, ntTarget16)
+ _ = binary.Write(&b, binary.LittleEndian, target16)
return b.Bytes()
}
diff --git a/vendor/github.com/Microsoft/go-winio/sd.go b/vendor/github.com/Microsoft/go-winio/sd.go
index db1b370a1b..5550ef6b61 100644
--- a/vendor/github.com/Microsoft/go-winio/sd.go
+++ b/vendor/github.com/Microsoft/go-winio/sd.go
@@ -1,23 +1,25 @@
+//go:build windows
// +build windows
package winio
import (
+ "errors"
"syscall"
"unsafe"
+
+ "golang.org/x/sys/windows"
)
//sys lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) = advapi32.LookupAccountNameW
+//sys lookupAccountSid(systemName *uint16, sid *byte, name *uint16, nameSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) = advapi32.LookupAccountSidW
//sys convertSidToStringSid(sid *byte, str **uint16) (err error) = advapi32.ConvertSidToStringSidW
+//sys convertStringSidToSid(str *uint16, sid **byte) (err error) = advapi32.ConvertStringSidToSidW
//sys convertStringSecurityDescriptorToSecurityDescriptor(str string, revision uint32, sd *uintptr, size *uint32) (err error) = advapi32.ConvertStringSecurityDescriptorToSecurityDescriptorW
//sys convertSecurityDescriptorToStringSecurityDescriptor(sd *byte, revision uint32, secInfo uint32, sddl **uint16, sddlSize *uint32) (err error) = advapi32.ConvertSecurityDescriptorToStringSecurityDescriptorW
//sys localFree(mem uintptr) = LocalFree
//sys getSecurityDescriptorLength(sd uintptr) (len uint32) = advapi32.GetSecurityDescriptorLength
-const (
- cERROR_NONE_MAPPED = syscall.Errno(1332)
-)
-
type AccountLookupError struct {
Name string
Err error
@@ -28,8 +30,10 @@ func (e *AccountLookupError) Error() string {
return "lookup account: empty account name specified"
}
var s string
- switch e.Err {
- case cERROR_NONE_MAPPED:
+ switch {
+ case errors.Is(e.Err, windows.ERROR_INVALID_SID):
+ s = "the security ID structure is invalid"
+ case errors.Is(e.Err, windows.ERROR_NONE_MAPPED):
s = "not found"
default:
s = e.Err.Error()
@@ -37,6 +41,8 @@ func (e *AccountLookupError) Error() string {
return "lookup account " + e.Name + ": " + s
}
+func (e *AccountLookupError) Unwrap() error { return e.Err }
+
type SddlConversionError struct {
Sddl string
Err error
@@ -46,15 +52,19 @@ func (e *SddlConversionError) Error() string {
return "convert " + e.Sddl + ": " + e.Err.Error()
}
+func (e *SddlConversionError) Unwrap() error { return e.Err }
+
// LookupSidByName looks up the SID of an account by name
+//
+//revive:disable-next-line:var-naming SID, not Sid
func LookupSidByName(name string) (sid string, err error) {
if name == "" {
- return "", &AccountLookupError{name, cERROR_NONE_MAPPED}
+ return "", &AccountLookupError{name, windows.ERROR_NONE_MAPPED}
}
var sidSize, sidNameUse, refDomainSize uint32
err = lookupAccountName(nil, name, nil, &sidSize, nil, &refDomainSize, &sidNameUse)
- if err != nil && err != syscall.ERROR_INSUFFICIENT_BUFFER {
+ if err != nil && err != syscall.ERROR_INSUFFICIENT_BUFFER { //nolint:errorlint // err is Errno
return "", &AccountLookupError{name, err}
}
sidBuffer := make([]byte, sidSize)
@@ -73,6 +83,42 @@ func LookupSidByName(name string) (sid string, err error) {
return sid, nil
}
+// LookupNameBySid looks up the name of an account by SID
+//
+//revive:disable-next-line:var-naming SID, not Sid
+func LookupNameBySid(sid string) (name string, err error) {
+ if sid == "" {
+ return "", &AccountLookupError{sid, windows.ERROR_NONE_MAPPED}
+ }
+
+ sidBuffer, err := windows.UTF16PtrFromString(sid)
+ if err != nil {
+ return "", &AccountLookupError{sid, err}
+ }
+
+ var sidPtr *byte
+ if err = convertStringSidToSid(sidBuffer, &sidPtr); err != nil {
+ return "", &AccountLookupError{sid, err}
+ }
+ defer localFree(uintptr(unsafe.Pointer(sidPtr)))
+
+ var nameSize, refDomainSize, sidNameUse uint32
+ err = lookupAccountSid(nil, sidPtr, nil, &nameSize, nil, &refDomainSize, &sidNameUse)
+ if err != nil && err != windows.ERROR_INSUFFICIENT_BUFFER { //nolint:errorlint // err is Errno
+ return "", &AccountLookupError{sid, err}
+ }
+
+ nameBuffer := make([]uint16, nameSize)
+ refDomainBuffer := make([]uint16, refDomainSize)
+ err = lookupAccountSid(nil, sidPtr, &nameBuffer[0], &nameSize, &refDomainBuffer[0], &refDomainSize, &sidNameUse)
+ if err != nil {
+ return "", &AccountLookupError{sid, err}
+ }
+
+ name = windows.UTF16ToString(nameBuffer)
+ return name, nil
+}
+
func SddlToSecurityDescriptor(sddl string) ([]byte, error) {
var sdBuffer uintptr
err := convertStringSecurityDescriptorToSecurityDescriptor(sddl, 1, &sdBuffer, nil)
@@ -87,7 +133,7 @@ func SddlToSecurityDescriptor(sddl string) ([]byte, error) {
func SecurityDescriptorToSddl(sd []byte) (string, error) {
var sddl *uint16
- // The returned string length seems to including an aribtrary number of terminating NULs.
+ // The returned string length seems to include an arbitrary number of terminating NULs.
// Don't use it.
err := convertSecurityDescriptorToStringSecurityDescriptor(&sd[0], 1, 0xff, &sddl, nil)
if err != nil {
diff --git a/vendor/github.com/Microsoft/go-winio/syscall.go b/vendor/github.com/Microsoft/go-winio/syscall.go
index 5955c99fde..a6ca111b39 100644
--- a/vendor/github.com/Microsoft/go-winio/syscall.go
+++ b/vendor/github.com/Microsoft/go-winio/syscall.go
@@ -1,3 +1,5 @@
+//go:build windows
+
package winio
-//go:generate go run golang.org/x/sys/windows/mkwinsyscall -output zsyscall_windows.go file.go pipe.go sd.go fileinfo.go privilege.go backup.go hvsock.go
+//go:generate go run github.com/Microsoft/go-winio/tools/mkwinsyscall -output zsyscall_windows.go ./*.go
diff --git a/vendor/github.com/Microsoft/go-winio/tools.go b/vendor/github.com/Microsoft/go-winio/tools.go
new file mode 100644
index 0000000000..2aa045843e
--- /dev/null
+++ b/vendor/github.com/Microsoft/go-winio/tools.go
@@ -0,0 +1,5 @@
+//go:build tools
+
+package winio
+
+import _ "golang.org/x/tools/cmd/stringer"
diff --git a/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go b/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go
index 176ff75e32..83f45a1351 100644
--- a/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go
+++ b/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go
@@ -1,4 +1,6 @@
-// Code generated by 'go generate'; DO NOT EDIT.
+//go:build windows
+
+// Code generated by 'go generate' using "github.com/Microsoft/go-winio/tools/mkwinsyscall"; DO NOT EDIT.
package winio
@@ -47,9 +49,11 @@ var (
procConvertSecurityDescriptorToStringSecurityDescriptorW = modadvapi32.NewProc("ConvertSecurityDescriptorToStringSecurityDescriptorW")
procConvertSidToStringSidW = modadvapi32.NewProc("ConvertSidToStringSidW")
procConvertStringSecurityDescriptorToSecurityDescriptorW = modadvapi32.NewProc("ConvertStringSecurityDescriptorToSecurityDescriptorW")
+ procConvertStringSidToSidW = modadvapi32.NewProc("ConvertStringSidToSidW")
procGetSecurityDescriptorLength = modadvapi32.NewProc("GetSecurityDescriptorLength")
procImpersonateSelf = modadvapi32.NewProc("ImpersonateSelf")
procLookupAccountNameW = modadvapi32.NewProc("LookupAccountNameW")
+ procLookupAccountSidW = modadvapi32.NewProc("LookupAccountSidW")
procLookupPrivilegeDisplayNameW = modadvapi32.NewProc("LookupPrivilegeDisplayNameW")
procLookupPrivilegeNameW = modadvapi32.NewProc("LookupPrivilegeNameW")
procLookupPrivilegeValueW = modadvapi32.NewProc("LookupPrivilegeValueW")
@@ -74,7 +78,6 @@ var (
procRtlDosPathNameToNtPathName_U = modntdll.NewProc("RtlDosPathNameToNtPathName_U")
procRtlNtStatusToDosErrorNoTeb = modntdll.NewProc("RtlNtStatusToDosErrorNoTeb")
procWSAGetOverlappedResult = modws2_32.NewProc("WSAGetOverlappedResult")
- procbind = modws2_32.NewProc("bind")
)
func adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (success bool, err error) {
@@ -123,6 +126,14 @@ func _convertStringSecurityDescriptorToSecurityDescriptor(str *uint16, revision
return
}
+func convertStringSidToSid(str *uint16, sid **byte) (err error) {
+ r1, _, e1 := syscall.Syscall(procConvertStringSidToSidW.Addr(), 2, uintptr(unsafe.Pointer(str)), uintptr(unsafe.Pointer(sid)), 0)
+ if r1 == 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
func getSecurityDescriptorLength(sd uintptr) (len uint32) {
r0, _, _ := syscall.Syscall(procGetSecurityDescriptorLength.Addr(), 1, uintptr(sd), 0, 0)
len = uint32(r0)
@@ -154,6 +165,14 @@ func _lookupAccountName(systemName *uint16, accountName *uint16, sid *byte, sidS
return
}
+func lookupAccountSid(systemName *uint16, sid *byte, name *uint16, nameSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) {
+ r1, _, e1 := syscall.Syscall9(procLookupAccountSidW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameSize)), uintptr(unsafe.Pointer(refDomain)), uintptr(unsafe.Pointer(refDomainSize)), uintptr(unsafe.Pointer(sidNameUse)), 0, 0)
+ if r1 == 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
func lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) {
var _p0 *uint16
_p0, err = syscall.UTF16PtrFromString(systemName)
@@ -380,25 +399,25 @@ func setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err erro
return
}
-func ntCreateNamedPipeFile(pipe *syscall.Handle, access uint32, oa *objectAttributes, iosb *ioStatusBlock, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntstatus) {
+func ntCreateNamedPipeFile(pipe *syscall.Handle, access uint32, oa *objectAttributes, iosb *ioStatusBlock, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntStatus) {
r0, _, _ := syscall.Syscall15(procNtCreateNamedPipeFile.Addr(), 14, uintptr(unsafe.Pointer(pipe)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(share), uintptr(disposition), uintptr(options), uintptr(typ), uintptr(readMode), uintptr(completionMode), uintptr(maxInstances), uintptr(inboundQuota), uintptr(outputQuota), uintptr(unsafe.Pointer(timeout)), 0)
- status = ntstatus(r0)
+ status = ntStatus(r0)
return
}
-func rtlDefaultNpAcl(dacl *uintptr) (status ntstatus) {
+func rtlDefaultNpAcl(dacl *uintptr) (status ntStatus) {
r0, _, _ := syscall.Syscall(procRtlDefaultNpAcl.Addr(), 1, uintptr(unsafe.Pointer(dacl)), 0, 0)
- status = ntstatus(r0)
+ status = ntStatus(r0)
return
}
-func rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntstatus) {
+func rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntStatus) {
r0, _, _ := syscall.Syscall6(procRtlDosPathNameToNtPathName_U.Addr(), 4, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(ntName)), uintptr(filePart), uintptr(reserved), 0, 0)
- status = ntstatus(r0)
+ status = ntStatus(r0)
return
}
-func rtlNtStatusToDosError(status ntstatus) (winerr error) {
+func rtlNtStatusToDosError(status ntStatus) (winerr error) {
r0, _, _ := syscall.Syscall(procRtlNtStatusToDosErrorNoTeb.Addr(), 1, uintptr(status), 0, 0)
if r0 != 0 {
winerr = syscall.Errno(r0)
@@ -417,11 +436,3 @@ func wsaGetOverlappedResult(h syscall.Handle, o *syscall.Overlapped, bytes *uint
}
return
}
-
-func bind(s syscall.Handle, name unsafe.Pointer, namelen int32) (err error) {
- r1, _, e1 := syscall.Syscall(procbind.Addr(), 3, uintptr(s), uintptr(name), uintptr(namelen))
- if r1 == socketError {
- err = errnoErr(e1)
- }
- return
-}
diff --git a/vendor/github.com/coreos/go-semver/semver/semver.go b/vendor/github.com/coreos/go-semver/semver/semver.go
index 76cf4852c7..eb9fb7ff2d 100644
--- a/vendor/github.com/coreos/go-semver/semver/semver.go
+++ b/vendor/github.com/coreos/go-semver/semver/semver.go
@@ -85,7 +85,7 @@ func (v *Version) Set(version string) error {
return fmt.Errorf("failed to validate metadata: %v", err)
}
- parsed := make([]int64, 3, 3)
+ parsed := make([]int64, 3)
for i, v := range dotParts[:3] {
val, err := strconv.ParseInt(v, 10, 64)
diff --git a/vendor/github.com/coreos/go-systemd/v22/journal/journal_unix.go b/vendor/github.com/coreos/go-systemd/v22/journal/journal_unix.go
index 439ad28746..c5b23a8196 100644
--- a/vendor/github.com/coreos/go-systemd/v22/journal/journal_unix.go
+++ b/vendor/github.com/coreos/go-systemd/v22/journal/journal_unix.go
@@ -69,6 +69,58 @@ func Enabled() bool {
return true
}
+// StderrIsJournalStream returns whether the process stderr is connected
+// to the Journal's stream transport.
+//
+// This can be used for automatic protocol upgrading described in [Journal Native Protocol].
+//
+// Returns true if JOURNAL_STREAM environment variable is present,
+// and stderr's device and inode numbers match it.
+//
+// Error is returned if unexpected error occurs: e.g. if JOURNAL_STREAM environment variable
+// is present, but malformed, fstat syscall fails, etc.
+//
+// [Journal Native Protocol]: https://systemd.io/JOURNAL_NATIVE_PROTOCOL/#automatic-protocol-upgrading
+func StderrIsJournalStream() (bool, error) {
+ return fdIsJournalStream(syscall.Stderr)
+}
+
+// StdoutIsJournalStream returns whether the process stdout is connected
+// to the Journal's stream transport.
+//
+// Returns true if JOURNAL_STREAM environment variable is present,
+// and stdout's device and inode numbers match it.
+//
+// Error is returned if unexpected error occurs: e.g. if JOURNAL_STREAM environment variable
+// is present, but malformed, fstat syscall fails, etc.
+//
+// Most users should probably use [StderrIsJournalStream].
+func StdoutIsJournalStream() (bool, error) {
+ return fdIsJournalStream(syscall.Stdout)
+}
+
+func fdIsJournalStream(fd int) (bool, error) {
+ journalStream := os.Getenv("JOURNAL_STREAM")
+ if journalStream == "" {
+ return false, nil
+ }
+
+ var expectedStat syscall.Stat_t
+ _, err := fmt.Sscanf(journalStream, "%d:%d", &expectedStat.Dev, &expectedStat.Ino)
+ if err != nil {
+ return false, fmt.Errorf("failed to parse JOURNAL_STREAM=%q: %v", journalStream, err)
+ }
+
+ var stat syscall.Stat_t
+ err = syscall.Fstat(fd, &stat)
+ if err != nil {
+ return false, err
+ }
+
+ match := stat.Dev == expectedStat.Dev && stat.Ino == expectedStat.Ino
+ return match, nil
+}
+
// Send a message to the local systemd journal. vars is a map of journald
// fields to values. Fields must be composed of uppercase letters, numbers,
// and underscores, but must not start with an underscore. Within these
diff --git a/vendor/github.com/coreos/go-systemd/v22/journal/journal_windows.go b/vendor/github.com/coreos/go-systemd/v22/journal/journal_windows.go
index 677aca68ed..322e41e74c 100644
--- a/vendor/github.com/coreos/go-systemd/v22/journal/journal_windows.go
+++ b/vendor/github.com/coreos/go-systemd/v22/journal/journal_windows.go
@@ -33,3 +33,11 @@ func Enabled() bool {
func Send(message string, priority Priority, vars map[string]string) error {
return errors.New("could not initialize socket to journald")
}
+
+func StderrIsJournalStream() (bool, error) {
+ return false, nil
+}
+
+func StdoutIsJournalStream() (bool, error) {
+ return false, nil
+}
diff --git a/vendor/github.com/docker/distribution/reference/reference.go b/vendor/github.com/docker/distribution/reference/reference.go
index 8c0c23b2fe..b7cd00b0d6 100644
--- a/vendor/github.com/docker/distribution/reference/reference.go
+++ b/vendor/github.com/docker/distribution/reference/reference.go
@@ -3,13 +3,13 @@
//
// Grammar
//
-// reference := name [ ":" tag ] [ "@" digest ]
+// reference := name [ ":" tag ] [ "@" digest ]
// name := [domain '/'] path-component ['/' path-component]*
// domain := domain-component ['.' domain-component]* [':' port-number]
// domain-component := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/
// port-number := /[0-9]+/
// path-component := alpha-numeric [separator alpha-numeric]*
-// alpha-numeric := /[a-z0-9]+/
+// alpha-numeric := /[a-z0-9]+/
// separator := /[_.]|__|[-]*/
//
// tag := /[\w][\w.-]{0,127}/
diff --git a/vendor/go.etcd.io/etcd/api/v3/version/version.go b/vendor/go.etcd.io/etcd/api/v3/version/version.go
index f3b389421e..d62f6474d9 100644
--- a/vendor/go.etcd.io/etcd/api/v3/version/version.go
+++ b/vendor/go.etcd.io/etcd/api/v3/version/version.go
@@ -26,7 +26,7 @@ import (
var (
// MinClusterVersion is the min cluster version this etcd binary is compatible with.
MinClusterVersion = "3.0.0"
- Version = "3.5.7"
+ Version = "3.5.9"
APIVersion = "unknown"
// Git SHA Value will be set during build
diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/logutil/zap.go b/vendor/go.etcd.io/etcd/client/pkg/v3/logutil/zap.go
index d7fd0d90db..34f35b9f28 100644
--- a/vendor/go.etcd.io/etcd/client/pkg/v3/logutil/zap.go
+++ b/vendor/go.etcd.io/etcd/client/pkg/v3/logutil/zap.go
@@ -16,6 +16,7 @@ package logutil
import (
"sort"
+ "time"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
@@ -46,15 +47,20 @@ var DefaultZapLoggerConfig = zap.Config{
// copied from "zap.NewProductionEncoderConfig" with some updates
EncoderConfig: zapcore.EncoderConfig{
- TimeKey: "ts",
- LevelKey: "level",
- NameKey: "logger",
- CallerKey: "caller",
- MessageKey: "msg",
- StacktraceKey: "stacktrace",
- LineEnding: zapcore.DefaultLineEnding,
- EncodeLevel: zapcore.LowercaseLevelEncoder,
- EncodeTime: zapcore.ISO8601TimeEncoder,
+ TimeKey: "ts",
+ LevelKey: "level",
+ NameKey: "logger",
+ CallerKey: "caller",
+ MessageKey: "msg",
+ StacktraceKey: "stacktrace",
+ LineEnding: zapcore.DefaultLineEnding,
+ EncodeLevel: zapcore.LowercaseLevelEncoder,
+
+ // Custom EncodeTime function to ensure we match format and precision of historic capnslog timestamps
+ EncodeTime: func(t time.Time, enc zapcore.PrimitiveArrayEncoder) {
+ enc.AppendString(t.Format("2006-01-02T15:04:05.999999Z0700"))
+ },
+
EncodeDuration: zapcore.StringDurationEncoder,
EncodeCaller: zapcore.ShortCallerEncoder,
},
diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/tlsutil/versions.go b/vendor/go.etcd.io/etcd/client/pkg/v3/tlsutil/versions.go
new file mode 100644
index 0000000000..ffcecd8c67
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/pkg/v3/tlsutil/versions.go
@@ -0,0 +1,47 @@
+// Copyright 2023 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package tlsutil
+
+import (
+ "crypto/tls"
+ "fmt"
+)
+
+type TLSVersion string
+
+// Constants for TLS versions.
+const (
+ TLSVersionDefault TLSVersion = ""
+ TLSVersion12 TLSVersion = "TLS1.2"
+ TLSVersion13 TLSVersion = "TLS1.3"
+)
+
+// GetTLSVersion returns the corresponding tls.Version or error.
+func GetTLSVersion(version string) (uint16, error) {
+ var v uint16
+
+ switch version {
+ case string(TLSVersionDefault):
+ v = 0 // 0 means let Go decide.
+ case string(TLSVersion12):
+ v = tls.VersionTLS12
+ case string(TLSVersion13):
+ v = tls.VersionTLS13
+ default:
+ return 0, fmt.Errorf("unexpected TLS version %q (must be one of: TLS1.2, TLS1.3)", version)
+ }
+
+ return v, nil
+}
diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/transport/listener.go b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/listener.go
index c3bc56a65b..150545d08d 100644
--- a/vendor/go.etcd.io/etcd/client/pkg/v3/transport/listener.go
+++ b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/listener.go
@@ -165,6 +165,14 @@ type TLSInfo struct {
// Note that cipher suites are prioritized in the given order.
CipherSuites []uint16
+ // MinVersion is the minimum TLS version that is acceptable.
+ // If not set, the minimum version is TLS 1.2.
+ MinVersion uint16
+
+ // MaxVersion is the maximum TLS version that is acceptable.
+ // If not set, the default used by Go is selected (see tls.Config.MaxVersion).
+ MaxVersion uint16
+
selfCert bool
// parseFunc exists to simplify testing. Typically, parseFunc
@@ -339,8 +347,8 @@ func SelfCert(lg *zap.Logger, dirpath string, hosts []string, selfSignedCertVali
// Previously,
// 1. Server has non-empty (*tls.Config).Certificates on client hello
// 2. Server calls (*tls.Config).GetCertificate iff:
-// - Server's (*tls.Config).Certificates is not empty, or
-// - Client supplies SNI; non-empty (*tls.ClientHelloInfo).ServerName
+// - Server's (*tls.Config).Certificates is not empty, or
+// - Client supplies SNI; non-empty (*tls.ClientHelloInfo).ServerName
//
// When (*tls.Config).Certificates is always populated on initial handshake,
// client is expected to provide a valid matching SNI to pass the TLS
@@ -378,8 +386,17 @@ func (info TLSInfo) baseConfig() (*tls.Config, error) {
}
}
+ var minVersion uint16
+ if info.MinVersion != 0 {
+ minVersion = info.MinVersion
+ } else {
+ // Default minimum version is TLS 1.2, previous versions are insecure and deprecated.
+ minVersion = tls.VersionTLS12
+ }
+
cfg := &tls.Config{
- MinVersion: tls.VersionTLS12,
+ MinVersion: minVersion,
+ MaxVersion: info.MaxVersion,
ServerName: info.ServerName,
}
@@ -510,11 +527,6 @@ func (info TLSInfo) ServerConfig() (*tls.Config, error) {
// "h2" NextProtos is necessary for enabling HTTP2 for go's HTTP server
cfg.NextProtos = []string{"h2"}
- // go1.13 enables TLS 1.3 by default
- // and in TLS 1.3, cipher suites are not configurable
- // setting Max TLS version to TLS 1.2 for go 1.13
- cfg.MaxVersion = tls.VersionTLS12
-
return cfg, nil
}
@@ -569,11 +581,6 @@ func (info TLSInfo) ClientConfig() (*tls.Config, error) {
}
}
- // go1.13 enables TLS 1.3 by default
- // and in TLS 1.3, cipher suites are not configurable
- // setting Max TLS version to TLS 1.2 for go 1.13
- cfg.MaxVersion = tls.VersionTLS12
-
return cfg, nil
}
diff --git a/vendor/go.etcd.io/etcd/client/v3/doc.go b/vendor/go.etcd.io/etcd/client/v3/doc.go
index 645d744a5a..fd61aff117 100644
--- a/vendor/go.etcd.io/etcd/client/v3/doc.go
+++ b/vendor/go.etcd.io/etcd/client/v3/doc.go
@@ -61,7 +61,8 @@
//
// 1. context error: canceled or deadline exceeded.
// 2. gRPC error: e.g. when clock drifts in server-side before client's context deadline exceeded.
-// See https://github.com/etcd-io/etcd/blob/main/api/v3rpc/rpctypes/error.go
+//
+// See https://github.com/etcd-io/etcd/blob/main/api/v3rpc/rpctypes/error.go
//
// Here is the example code to handle client errors:
//
@@ -102,5 +103,4 @@
// The grpc load balancer is registered statically and is shared across etcd clients.
// To enable detailed load balancer logging, set the ETCD_CLIENT_DEBUG environment
// variable. E.g. "ETCD_CLIENT_DEBUG=1".
-//
package clientv3
diff --git a/vendor/go.etcd.io/etcd/client/v3/internal/endpoint/endpoint.go b/vendor/go.etcd.io/etcd/client/v3/internal/endpoint/endpoint.go
index 1d3f1a7a2c..f6674235cd 100644
--- a/vendor/go.etcd.io/etcd/client/v3/internal/endpoint/endpoint.go
+++ b/vendor/go.etcd.io/etcd/client/v3/internal/endpoint/endpoint.go
@@ -45,8 +45,8 @@ func extractHostFromPath(pathStr string) string {
return extractHostFromHostPort(path.Base(pathStr))
}
-//mustSplit2 returns the values from strings.SplitN(s, sep, 2).
-//If sep is not found, it returns ("", "", false) instead.
+// mustSplit2 returns the values from strings.SplitN(s, sep, 2).
+// If sep is not found, it returns ("", "", false) instead.
func mustSplit2(s, sep string) (string, string) {
spl := strings.SplitN(s, sep, 2)
if len(spl) < 2 {
@@ -81,11 +81,12 @@ func schemeToCredsRequirement(schema string) CredsRequirement {
// The main differences:
// - etcd supports unixs & https names as opposed to unix & http to
// distinguish need to configure certificates.
-// - etcd support http(s) names as opposed to tcp supported by grpc/dial method.
-// - etcd supports unix(s)://local-file naming schema
+// - etcd support http(s) names as opposed to tcp supported by grpc/dial method.
+// - etcd supports unix(s)://local-file naming schema
// (as opposed to unix:local-file canonical name used by grpc for current dir files).
-// - Within the unix(s) schemas, the last segment (filename) without 'port' (content after colon)
-// is considered serverName - to allow local testing of cert-protected communication.
+// - Within the unix(s) schemas, the last segment (filename) without 'port' (content after colon)
+// is considered serverName - to allow local testing of cert-protected communication.
+//
// See more:
// - https://github.com/grpc/grpc-go/blob/26c143bd5f59344a4b8a1e491e0f5e18aa97abc7/internal/grpcutil/target.go#L47
// - https://golang.org/pkg/net/#Dial
diff --git a/vendor/go.etcd.io/etcd/client/v3/txn.go b/vendor/go.etcd.io/etcd/client/v3/txn.go
index 22301fba6b..3f6a953cf0 100644
--- a/vendor/go.etcd.io/etcd/client/v3/txn.go
+++ b/vendor/go.etcd.io/etcd/client/v3/txn.go
@@ -25,15 +25,14 @@ import (
// Txn is the interface that wraps mini-transactions.
//
-// Txn(context.TODO()).If(
-// Compare(Value(k1), ">", v1),
-// Compare(Version(k1), "=", 2)
-// ).Then(
-// OpPut(k2,v2), OpPut(k3,v3)
-// ).Else(
-// OpPut(k4,v4), OpPut(k5,v5)
-// ).Commit()
-//
+// Txn(context.TODO()).If(
+// Compare(Value(k1), ">", v1),
+// Compare(Version(k1), "=", 2)
+// ).Then(
+// OpPut(k2,v2), OpPut(k3,v3)
+// ).Else(
+// OpPut(k4,v4), OpPut(k5,v5)
+// ).Commit()
type Txn interface {
// If takes a list of comparison. If all comparisons passed in succeed,
// the operations passed into Then() will be executed. Or the operations
diff --git a/vendor/go.etcd.io/etcd/client/v3/watch.go b/vendor/go.etcd.io/etcd/client/v3/watch.go
index bc886936c8..41a6ec9763 100644
--- a/vendor/go.etcd.io/etcd/client/v3/watch.go
+++ b/vendor/go.etcd.io/etcd/client/v3/watch.go
@@ -848,7 +848,7 @@ func (w *watchGrpcStream) serveSubstream(ws *watcherStream, resumec chan struct{
}
} else {
// current progress of watch; <= store revision
- nextRev = wr.Header.Revision
+ nextRev = wr.Header.Revision + 1
}
if len(wr.Events) > 0 {
diff --git a/vendor/go.uber.org/atomic/.gitignore b/vendor/go.uber.org/atomic/.gitignore
index c3fa253893..2e337a0ed5 100644
--- a/vendor/go.uber.org/atomic/.gitignore
+++ b/vendor/go.uber.org/atomic/.gitignore
@@ -10,3 +10,6 @@ lint.log
# Profiling output
*.prof
+
+# Output of fossa analyzer
+/fossa
diff --git a/vendor/go.uber.org/atomic/.travis.yml b/vendor/go.uber.org/atomic/.travis.yml
deleted file mode 100644
index 13d0a4f254..0000000000
--- a/vendor/go.uber.org/atomic/.travis.yml
+++ /dev/null
@@ -1,27 +0,0 @@
-sudo: false
-language: go
-go_import_path: go.uber.org/atomic
-
-env:
- global:
- - GO111MODULE=on
-
-matrix:
- include:
- - go: oldstable
- - go: stable
- env: LINT=1
-
-cache:
- directories:
- - vendor
-
-before_install:
- - go version
-
-script:
- - test -z "$LINT" || make lint
- - make cover
-
-after_success:
- - bash <(curl -s https://codecov.io/bash)
diff --git a/vendor/go.uber.org/atomic/CHANGELOG.md b/vendor/go.uber.org/atomic/CHANGELOG.md
index 24c0274dc3..5fe03f21bd 100644
--- a/vendor/go.uber.org/atomic/CHANGELOG.md
+++ b/vendor/go.uber.org/atomic/CHANGELOG.md
@@ -4,6 +4,37 @@ All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
+## [1.10.0] - 2022-08-11
+### Added
+- Add `atomic.Float32` type for atomic operations on `float32`.
+- Add `CompareAndSwap` and `Swap` methods to `atomic.String`, `atomic.Error`,
+ and `atomic.Value`.
+- Add generic `atomic.Pointer[T]` type for atomic operations on pointers of any
+ type. This is present only for Go 1.18 or higher, and is a drop-in for
+ replacement for the standard library's `sync/atomic.Pointer` type.
+
+### Changed
+- Deprecate `CAS` methods on all types in favor of corresponding
+ `CompareAndSwap` methods.
+
+Thanks to @eNV25 and @icpd for their contributions to this release.
+
+[1.10.0]: https://github.com/uber-go/atomic/compare/v1.9.0...v1.10.0
+
+## [1.9.0] - 2021-07-15
+### Added
+- Add `Float64.Swap` to match int atomic operations.
+- Add `atomic.Time` type for atomic operations on `time.Time` values.
+
+[1.9.0]: https://github.com/uber-go/atomic/compare/v1.8.0...v1.9.0
+
+## [1.8.0] - 2021-06-09
+### Added
+- Add `atomic.Uintptr` type for atomic operations on `uintptr` values.
+- Add `atomic.UnsafePointer` type for atomic operations on `unsafe.Pointer` values.
+
+[1.8.0]: https://github.com/uber-go/atomic/compare/v1.7.0...v1.8.0
+
## [1.7.0] - 2020-09-14
### Added
- Support JSON serialization and deserialization of primitive atomic types.
@@ -15,32 +46,46 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
### Removed
- Remove dependency on `golang.org/x/{lint, tools}`.
+[1.7.0]: https://github.com/uber-go/atomic/compare/v1.6.0...v1.7.0
+
## [1.6.0] - 2020-02-24
### Changed
- Drop library dependency on `golang.org/x/{lint, tools}`.
+[1.6.0]: https://github.com/uber-go/atomic/compare/v1.5.1...v1.6.0
+
## [1.5.1] - 2019-11-19
- Fix bug where `Bool.CAS` and `Bool.Toggle` do work correctly together
causing `CAS` to fail even though the old value matches.
+[1.5.1]: https://github.com/uber-go/atomic/compare/v1.5.0...v1.5.1
+
## [1.5.0] - 2019-10-29
### Changed
- With Go modules, only the `go.uber.org/atomic` import path is supported now.
If you need to use the old import path, please add a `replace` directive to
your `go.mod`.
+[1.5.0]: https://github.com/uber-go/atomic/compare/v1.4.0...v1.5.0
+
## [1.4.0] - 2019-05-01
### Added
- Add `atomic.Error` type for atomic operations on `error` values.
+[1.4.0]: https://github.com/uber-go/atomic/compare/v1.3.2...v1.4.0
+
## [1.3.2] - 2018-05-02
### Added
- Add `atomic.Duration` type for atomic operations on `time.Duration` values.
+[1.3.2]: https://github.com/uber-go/atomic/compare/v1.3.1...v1.3.2
+
## [1.3.1] - 2017-11-14
### Fixed
- Revert optimization for `atomic.String.Store("")` which caused data races.
+[1.3.1]: https://github.com/uber-go/atomic/compare/v1.3.0...v1.3.1
+
## [1.3.0] - 2017-11-13
### Added
- Add `atomic.Bool.CAS` for compare-and-swap semantics on bools.
@@ -48,10 +93,14 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
### Changed
- Optimize `atomic.String.Store("")` by avoiding an allocation.
+[1.3.0]: https://github.com/uber-go/atomic/compare/v1.2.0...v1.3.0
+
## [1.2.0] - 2017-04-12
### Added
- Shadow `atomic.Value` from `sync/atomic`.
+[1.2.0]: https://github.com/uber-go/atomic/compare/v1.1.0...v1.2.0
+
## [1.1.0] - 2017-03-10
### Added
- Add atomic `Float64` type.
@@ -59,18 +108,10 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
### Changed
- Support new `go.uber.org/atomic` import path.
+[1.1.0]: https://github.com/uber-go/atomic/compare/v1.0.0...v1.1.0
+
## [1.0.0] - 2016-07-18
- Initial release.
-[1.7.0]: https://github.com/uber-go/atomic/compare/v1.6.0...v1.7.0
-[1.6.0]: https://github.com/uber-go/atomic/compare/v1.5.1...v1.6.0
-[1.5.1]: https://github.com/uber-go/atomic/compare/v1.5.0...v1.5.1
-[1.5.0]: https://github.com/uber-go/atomic/compare/v1.4.0...v1.5.0
-[1.4.0]: https://github.com/uber-go/atomic/compare/v1.3.2...v1.4.0
-[1.3.2]: https://github.com/uber-go/atomic/compare/v1.3.1...v1.3.2
-[1.3.1]: https://github.com/uber-go/atomic/compare/v1.3.0...v1.3.1
-[1.3.0]: https://github.com/uber-go/atomic/compare/v1.2.0...v1.3.0
-[1.2.0]: https://github.com/uber-go/atomic/compare/v1.1.0...v1.2.0
-[1.1.0]: https://github.com/uber-go/atomic/compare/v1.0.0...v1.1.0
[1.0.0]: https://github.com/uber-go/atomic/releases/tag/v1.0.0
diff --git a/vendor/go.uber.org/atomic/Makefile b/vendor/go.uber.org/atomic/Makefile
index 1b1376d425..46c945b32b 100644
--- a/vendor/go.uber.org/atomic/Makefile
+++ b/vendor/go.uber.org/atomic/Makefile
@@ -69,6 +69,7 @@ generate: $(GEN_ATOMICINT) $(GEN_ATOMICWRAPPER)
generatenodirty:
@[ -z "$$(git status --porcelain)" ] || ( \
echo "Working tree is dirty. Commit your changes first."; \
+ git status; \
exit 1 )
@make generate
@status=$$(git status --porcelain); \
diff --git a/vendor/go.uber.org/atomic/README.md b/vendor/go.uber.org/atomic/README.md
index ade0c20f16..96b47a1f12 100644
--- a/vendor/go.uber.org/atomic/README.md
+++ b/vendor/go.uber.org/atomic/README.md
@@ -55,8 +55,8 @@ Released under the [MIT License](LICENSE.txt).
[doc-img]: https://godoc.org/github.com/uber-go/atomic?status.svg
[doc]: https://godoc.org/go.uber.org/atomic
-[ci-img]: https://travis-ci.com/uber-go/atomic.svg?branch=master
-[ci]: https://travis-ci.com/uber-go/atomic
+[ci-img]: https://github.com/uber-go/atomic/actions/workflows/go.yml/badge.svg
+[ci]: https://github.com/uber-go/atomic/actions/workflows/go.yml
[cov-img]: https://codecov.io/gh/uber-go/atomic/branch/master/graph/badge.svg
[cov]: https://codecov.io/gh/uber-go/atomic
[reportcard-img]: https://goreportcard.com/badge/go.uber.org/atomic
diff --git a/vendor/go.uber.org/atomic/bool.go b/vendor/go.uber.org/atomic/bool.go
index 9cf1914b1f..dfa2085f49 100644
--- a/vendor/go.uber.org/atomic/bool.go
+++ b/vendor/go.uber.org/atomic/bool.go
@@ -1,6 +1,6 @@
// @generated Code generated by gen-atomicwrapper.
-// Copyright (c) 2020 Uber Technologies, Inc.
+// Copyright (c) 2020-2022 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -36,10 +36,10 @@ type Bool struct {
var _zeroBool bool
// NewBool creates a new Bool.
-func NewBool(v bool) *Bool {
+func NewBool(val bool) *Bool {
x := &Bool{}
- if v != _zeroBool {
- x.Store(v)
+ if val != _zeroBool {
+ x.Store(val)
}
return x
}
@@ -50,19 +50,26 @@ func (x *Bool) Load() bool {
}
// Store atomically stores the passed bool.
-func (x *Bool) Store(v bool) {
- x.v.Store(boolToInt(v))
+func (x *Bool) Store(val bool) {
+ x.v.Store(boolToInt(val))
}
// CAS is an atomic compare-and-swap for bool values.
-func (x *Bool) CAS(o, n bool) bool {
- return x.v.CAS(boolToInt(o), boolToInt(n))
+//
+// Deprecated: Use CompareAndSwap.
+func (x *Bool) CAS(old, new bool) (swapped bool) {
+ return x.CompareAndSwap(old, new)
+}
+
+// CompareAndSwap is an atomic compare-and-swap for bool values.
+func (x *Bool) CompareAndSwap(old, new bool) (swapped bool) {
+ return x.v.CompareAndSwap(boolToInt(old), boolToInt(new))
}
// Swap atomically stores the given bool and returns the old
// value.
-func (x *Bool) Swap(o bool) bool {
- return truthy(x.v.Swap(boolToInt(o)))
+func (x *Bool) Swap(val bool) (old bool) {
+ return truthy(x.v.Swap(boolToInt(val)))
}
// MarshalJSON encodes the wrapped bool into JSON.
diff --git a/vendor/go.uber.org/atomic/bool_ext.go b/vendor/go.uber.org/atomic/bool_ext.go
index c7bf7a827a..a2e60e9873 100644
--- a/vendor/go.uber.org/atomic/bool_ext.go
+++ b/vendor/go.uber.org/atomic/bool_ext.go
@@ -38,7 +38,7 @@ func boolToInt(b bool) uint32 {
}
// Toggle atomically negates the Boolean and returns the previous value.
-func (b *Bool) Toggle() bool {
+func (b *Bool) Toggle() (old bool) {
for {
old := b.Load()
if b.CAS(old, !old) {
diff --git a/vendor/go.uber.org/atomic/duration.go b/vendor/go.uber.org/atomic/duration.go
index 027cfcb20b..6f4157445c 100644
--- a/vendor/go.uber.org/atomic/duration.go
+++ b/vendor/go.uber.org/atomic/duration.go
@@ -1,6 +1,6 @@
// @generated Code generated by gen-atomicwrapper.
-// Copyright (c) 2020 Uber Technologies, Inc.
+// Copyright (c) 2020-2022 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -37,10 +37,10 @@ type Duration struct {
var _zeroDuration time.Duration
// NewDuration creates a new Duration.
-func NewDuration(v time.Duration) *Duration {
+func NewDuration(val time.Duration) *Duration {
x := &Duration{}
- if v != _zeroDuration {
- x.Store(v)
+ if val != _zeroDuration {
+ x.Store(val)
}
return x
}
@@ -51,19 +51,26 @@ func (x *Duration) Load() time.Duration {
}
// Store atomically stores the passed time.Duration.
-func (x *Duration) Store(v time.Duration) {
- x.v.Store(int64(v))
+func (x *Duration) Store(val time.Duration) {
+ x.v.Store(int64(val))
}
// CAS is an atomic compare-and-swap for time.Duration values.
-func (x *Duration) CAS(o, n time.Duration) bool {
- return x.v.CAS(int64(o), int64(n))
+//
+// Deprecated: Use CompareAndSwap.
+func (x *Duration) CAS(old, new time.Duration) (swapped bool) {
+ return x.CompareAndSwap(old, new)
+}
+
+// CompareAndSwap is an atomic compare-and-swap for time.Duration values.
+func (x *Duration) CompareAndSwap(old, new time.Duration) (swapped bool) {
+ return x.v.CompareAndSwap(int64(old), int64(new))
}
// Swap atomically stores the given time.Duration and returns the old
// value.
-func (x *Duration) Swap(o time.Duration) time.Duration {
- return time.Duration(x.v.Swap(int64(o)))
+func (x *Duration) Swap(val time.Duration) (old time.Duration) {
+ return time.Duration(x.v.Swap(int64(val)))
}
// MarshalJSON encodes the wrapped time.Duration into JSON.
diff --git a/vendor/go.uber.org/atomic/duration_ext.go b/vendor/go.uber.org/atomic/duration_ext.go
index 6273b66bd6..4c18b0a9ed 100644
--- a/vendor/go.uber.org/atomic/duration_ext.go
+++ b/vendor/go.uber.org/atomic/duration_ext.go
@@ -25,13 +25,13 @@ import "time"
//go:generate bin/gen-atomicwrapper -name=Duration -type=time.Duration -wrapped=Int64 -pack=int64 -unpack=time.Duration -cas -swap -json -imports time -file=duration.go
// Add atomically adds to the wrapped time.Duration and returns the new value.
-func (d *Duration) Add(n time.Duration) time.Duration {
- return time.Duration(d.v.Add(int64(n)))
+func (d *Duration) Add(delta time.Duration) time.Duration {
+ return time.Duration(d.v.Add(int64(delta)))
}
// Sub atomically subtracts from the wrapped time.Duration and returns the new value.
-func (d *Duration) Sub(n time.Duration) time.Duration {
- return time.Duration(d.v.Sub(int64(n)))
+func (d *Duration) Sub(delta time.Duration) time.Duration {
+ return time.Duration(d.v.Sub(int64(delta)))
}
// String encodes the wrapped value as a string.
diff --git a/vendor/go.uber.org/atomic/error.go b/vendor/go.uber.org/atomic/error.go
index a6166fbea0..27b23ea162 100644
--- a/vendor/go.uber.org/atomic/error.go
+++ b/vendor/go.uber.org/atomic/error.go
@@ -1,6 +1,6 @@
// @generated Code generated by gen-atomicwrapper.
-// Copyright (c) 2020 Uber Technologies, Inc.
+// Copyright (c) 2020-2022 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -32,10 +32,10 @@ type Error struct {
var _zeroError error
// NewError creates a new Error.
-func NewError(v error) *Error {
+func NewError(val error) *Error {
x := &Error{}
- if v != _zeroError {
- x.Store(v)
+ if val != _zeroError {
+ x.Store(val)
}
return x
}
@@ -46,6 +46,17 @@ func (x *Error) Load() error {
}
// Store atomically stores the passed error.
-func (x *Error) Store(v error) {
- x.v.Store(packError(v))
+func (x *Error) Store(val error) {
+ x.v.Store(packError(val))
+}
+
+// CompareAndSwap is an atomic compare-and-swap for error values.
+func (x *Error) CompareAndSwap(old, new error) (swapped bool) {
+ return x.v.CompareAndSwap(packError(old), packError(new))
+}
+
+// Swap atomically stores the given error and returns the old
+// value.
+func (x *Error) Swap(val error) (old error) {
+ return unpackError(x.v.Swap(packError(val)))
}
diff --git a/vendor/go.uber.org/atomic/error_ext.go b/vendor/go.uber.org/atomic/error_ext.go
index ffe0be21cb..d31fb633bb 100644
--- a/vendor/go.uber.org/atomic/error_ext.go
+++ b/vendor/go.uber.org/atomic/error_ext.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2020 Uber Technologies, Inc.
+// Copyright (c) 2020-2022 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -23,7 +23,7 @@ package atomic
// atomic.Value panics on nil inputs, or if the underlying type changes.
// Stabilize by always storing a custom struct that we control.
-//go:generate bin/gen-atomicwrapper -name=Error -type=error -wrapped=Value -pack=packError -unpack=unpackError -file=error.go
+//go:generate bin/gen-atomicwrapper -name=Error -type=error -wrapped=Value -pack=packError -unpack=unpackError -compareandswap -swap -file=error.go
type packedError struct{ Value error }
diff --git a/vendor/go.uber.org/atomic/float32.go b/vendor/go.uber.org/atomic/float32.go
new file mode 100644
index 0000000000..5d535a6d2a
--- /dev/null
+++ b/vendor/go.uber.org/atomic/float32.go
@@ -0,0 +1,77 @@
+// @generated Code generated by gen-atomicwrapper.
+
+// Copyright (c) 2020-2022 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+import (
+ "encoding/json"
+ "math"
+)
+
+// Float32 is an atomic type-safe wrapper for float32 values.
+type Float32 struct {
+ _ nocmp // disallow non-atomic comparison
+
+ v Uint32
+}
+
+var _zeroFloat32 float32
+
+// NewFloat32 creates a new Float32.
+func NewFloat32(val float32) *Float32 {
+ x := &Float32{}
+ if val != _zeroFloat32 {
+ x.Store(val)
+ }
+ return x
+}
+
+// Load atomically loads the wrapped float32.
+func (x *Float32) Load() float32 {
+ return math.Float32frombits(x.v.Load())
+}
+
+// Store atomically stores the passed float32.
+func (x *Float32) Store(val float32) {
+ x.v.Store(math.Float32bits(val))
+}
+
+// Swap atomically stores the given float32 and returns the old
+// value.
+func (x *Float32) Swap(val float32) (old float32) {
+ return math.Float32frombits(x.v.Swap(math.Float32bits(val)))
+}
+
+// MarshalJSON encodes the wrapped float32 into JSON.
+func (x *Float32) MarshalJSON() ([]byte, error) {
+ return json.Marshal(x.Load())
+}
+
+// UnmarshalJSON decodes a float32 from JSON.
+func (x *Float32) UnmarshalJSON(b []byte) error {
+ var v float32
+ if err := json.Unmarshal(b, &v); err != nil {
+ return err
+ }
+ x.Store(v)
+ return nil
+}
diff --git a/vendor/go.uber.org/atomic/float32_ext.go b/vendor/go.uber.org/atomic/float32_ext.go
new file mode 100644
index 0000000000..b0cd8d9c82
--- /dev/null
+++ b/vendor/go.uber.org/atomic/float32_ext.go
@@ -0,0 +1,76 @@
+// Copyright (c) 2020-2022 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+import (
+ "math"
+ "strconv"
+)
+
+//go:generate bin/gen-atomicwrapper -name=Float32 -type=float32 -wrapped=Uint32 -pack=math.Float32bits -unpack=math.Float32frombits -swap -json -imports math -file=float32.go
+
+// Add atomically adds to the wrapped float32 and returns the new value.
+func (f *Float32) Add(delta float32) float32 {
+ for {
+ old := f.Load()
+ new := old + delta
+ if f.CAS(old, new) {
+ return new
+ }
+ }
+}
+
+// Sub atomically subtracts from the wrapped float32 and returns the new value.
+func (f *Float32) Sub(delta float32) float32 {
+ return f.Add(-delta)
+}
+
+// CAS is an atomic compare-and-swap for float32 values.
+//
+// Deprecated: Use CompareAndSwap
+func (f *Float32) CAS(old, new float32) (swapped bool) {
+ return f.CompareAndSwap(old, new)
+}
+
+// CompareAndSwap is an atomic compare-and-swap for float32 values.
+//
+// Note: CompareAndSwap handles NaN incorrectly. NaN != NaN using Go's inbuilt operators
+// but CompareAndSwap allows a stored NaN to compare equal to a passed in NaN.
+// This avoids typical CompareAndSwap loops from blocking forever, e.g.,
+//
+// for {
+// old := atom.Load()
+// new = f(old)
+// if atom.CompareAndSwap(old, new) {
+// break
+// }
+// }
+//
+// If CompareAndSwap did not match NaN to match, then the above would loop forever.
+func (f *Float32) CompareAndSwap(old, new float32) (swapped bool) {
+ return f.v.CompareAndSwap(math.Float32bits(old), math.Float32bits(new))
+}
+
+// String encodes the wrapped value as a string.
+func (f *Float32) String() string {
+ // 'g' is the behavior for floats with %v.
+ return strconv.FormatFloat(float64(f.Load()), 'g', -1, 32)
+}
diff --git a/vendor/go.uber.org/atomic/float64.go b/vendor/go.uber.org/atomic/float64.go
index 0719060207..11d5189a5f 100644
--- a/vendor/go.uber.org/atomic/float64.go
+++ b/vendor/go.uber.org/atomic/float64.go
@@ -1,6 +1,6 @@
// @generated Code generated by gen-atomicwrapper.
-// Copyright (c) 2020 Uber Technologies, Inc.
+// Copyright (c) 2020-2022 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -37,10 +37,10 @@ type Float64 struct {
var _zeroFloat64 float64
// NewFloat64 creates a new Float64.
-func NewFloat64(v float64) *Float64 {
+func NewFloat64(val float64) *Float64 {
x := &Float64{}
- if v != _zeroFloat64 {
- x.Store(v)
+ if val != _zeroFloat64 {
+ x.Store(val)
}
return x
}
@@ -51,13 +51,14 @@ func (x *Float64) Load() float64 {
}
// Store atomically stores the passed float64.
-func (x *Float64) Store(v float64) {
- x.v.Store(math.Float64bits(v))
+func (x *Float64) Store(val float64) {
+ x.v.Store(math.Float64bits(val))
}
-// CAS is an atomic compare-and-swap for float64 values.
-func (x *Float64) CAS(o, n float64) bool {
- return x.v.CAS(math.Float64bits(o), math.Float64bits(n))
+// Swap atomically stores the given float64 and returns the old
+// value.
+func (x *Float64) Swap(val float64) (old float64) {
+ return math.Float64frombits(x.v.Swap(math.Float64bits(val)))
}
// MarshalJSON encodes the wrapped float64 into JSON.
diff --git a/vendor/go.uber.org/atomic/float64_ext.go b/vendor/go.uber.org/atomic/float64_ext.go
index 927b1add74..48c52b0abf 100644
--- a/vendor/go.uber.org/atomic/float64_ext.go
+++ b/vendor/go.uber.org/atomic/float64_ext.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2020 Uber Technologies, Inc.
+// Copyright (c) 2020-2022 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -20,15 +20,18 @@
package atomic
-import "strconv"
+import (
+ "math"
+ "strconv"
+)
-//go:generate bin/gen-atomicwrapper -name=Float64 -type=float64 -wrapped=Uint64 -pack=math.Float64bits -unpack=math.Float64frombits -cas -json -imports math -file=float64.go
+//go:generate bin/gen-atomicwrapper -name=Float64 -type=float64 -wrapped=Uint64 -pack=math.Float64bits -unpack=math.Float64frombits -swap -json -imports math -file=float64.go
// Add atomically adds to the wrapped float64 and returns the new value.
-func (f *Float64) Add(s float64) float64 {
+func (f *Float64) Add(delta float64) float64 {
for {
old := f.Load()
- new := old + s
+ new := old + delta
if f.CAS(old, new) {
return new
}
@@ -36,8 +39,34 @@ func (f *Float64) Add(s float64) float64 {
}
// Sub atomically subtracts from the wrapped float64 and returns the new value.
-func (f *Float64) Sub(s float64) float64 {
- return f.Add(-s)
+func (f *Float64) Sub(delta float64) float64 {
+ return f.Add(-delta)
+}
+
+// CAS is an atomic compare-and-swap for float64 values.
+//
+// Deprecated: Use CompareAndSwap
+func (f *Float64) CAS(old, new float64) (swapped bool) {
+ return f.CompareAndSwap(old, new)
+}
+
+// CompareAndSwap is an atomic compare-and-swap for float64 values.
+//
+// Note: CompareAndSwap handles NaN incorrectly. NaN != NaN using Go's inbuilt operators
+// but CompareAndSwap allows a stored NaN to compare equal to a passed in NaN.
+// This avoids typical CompareAndSwap loops from blocking forever, e.g.,
+//
+// for {
+// old := atom.Load()
+// new = f(old)
+// if atom.CompareAndSwap(old, new) {
+// break
+// }
+// }
+//
+// If CompareAndSwap did not match NaN to match, then the above would loop forever.
+func (f *Float64) CompareAndSwap(old, new float64) (swapped bool) {
+ return f.v.CompareAndSwap(math.Float64bits(old), math.Float64bits(new))
}
// String encodes the wrapped value as a string.
diff --git a/vendor/go.uber.org/atomic/gen.go b/vendor/go.uber.org/atomic/gen.go
index 50d6b24858..1e9ef4f879 100644
--- a/vendor/go.uber.org/atomic/gen.go
+++ b/vendor/go.uber.org/atomic/gen.go
@@ -24,3 +24,4 @@ package atomic
//go:generate bin/gen-atomicint -name=Int64 -wrapped=int64 -file=int64.go
//go:generate bin/gen-atomicint -name=Uint32 -wrapped=uint32 -unsigned -file=uint32.go
//go:generate bin/gen-atomicint -name=Uint64 -wrapped=uint64 -unsigned -file=uint64.go
+//go:generate bin/gen-atomicint -name=Uintptr -wrapped=uintptr -unsigned -file=uintptr.go
diff --git a/vendor/go.uber.org/atomic/int32.go b/vendor/go.uber.org/atomic/int32.go
index 18ae56493e..b9a68f42ca 100644
--- a/vendor/go.uber.org/atomic/int32.go
+++ b/vendor/go.uber.org/atomic/int32.go
@@ -1,6 +1,6 @@
// @generated Code generated by gen-atomicint.
-// Copyright (c) 2020 Uber Technologies, Inc.
+// Copyright (c) 2020-2022 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -36,8 +36,8 @@ type Int32 struct {
}
// NewInt32 creates a new Int32.
-func NewInt32(i int32) *Int32 {
- return &Int32{v: i}
+func NewInt32(val int32) *Int32 {
+ return &Int32{v: val}
}
// Load atomically loads the wrapped value.
@@ -46,13 +46,13 @@ func (i *Int32) Load() int32 {
}
// Add atomically adds to the wrapped int32 and returns the new value.
-func (i *Int32) Add(n int32) int32 {
- return atomic.AddInt32(&i.v, n)
+func (i *Int32) Add(delta int32) int32 {
+ return atomic.AddInt32(&i.v, delta)
}
// Sub atomically subtracts from the wrapped int32 and returns the new value.
-func (i *Int32) Sub(n int32) int32 {
- return atomic.AddInt32(&i.v, -n)
+func (i *Int32) Sub(delta int32) int32 {
+ return atomic.AddInt32(&i.v, -delta)
}
// Inc atomically increments the wrapped int32 and returns the new value.
@@ -66,18 +66,25 @@ func (i *Int32) Dec() int32 {
}
// CAS is an atomic compare-and-swap.
-func (i *Int32) CAS(old, new int32) bool {
+//
+// Deprecated: Use CompareAndSwap.
+func (i *Int32) CAS(old, new int32) (swapped bool) {
+ return i.CompareAndSwap(old, new)
+}
+
+// CompareAndSwap is an atomic compare-and-swap.
+func (i *Int32) CompareAndSwap(old, new int32) (swapped bool) {
return atomic.CompareAndSwapInt32(&i.v, old, new)
}
// Store atomically stores the passed value.
-func (i *Int32) Store(n int32) {
- atomic.StoreInt32(&i.v, n)
+func (i *Int32) Store(val int32) {
+ atomic.StoreInt32(&i.v, val)
}
// Swap atomically swaps the wrapped int32 and returns the old value.
-func (i *Int32) Swap(n int32) int32 {
- return atomic.SwapInt32(&i.v, n)
+func (i *Int32) Swap(val int32) (old int32) {
+ return atomic.SwapInt32(&i.v, val)
}
// MarshalJSON encodes the wrapped int32 into JSON.
diff --git a/vendor/go.uber.org/atomic/int64.go b/vendor/go.uber.org/atomic/int64.go
index 2bcbbfaa95..78d260976f 100644
--- a/vendor/go.uber.org/atomic/int64.go
+++ b/vendor/go.uber.org/atomic/int64.go
@@ -1,6 +1,6 @@
// @generated Code generated by gen-atomicint.
-// Copyright (c) 2020 Uber Technologies, Inc.
+// Copyright (c) 2020-2022 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -36,8 +36,8 @@ type Int64 struct {
}
// NewInt64 creates a new Int64.
-func NewInt64(i int64) *Int64 {
- return &Int64{v: i}
+func NewInt64(val int64) *Int64 {
+ return &Int64{v: val}
}
// Load atomically loads the wrapped value.
@@ -46,13 +46,13 @@ func (i *Int64) Load() int64 {
}
// Add atomically adds to the wrapped int64 and returns the new value.
-func (i *Int64) Add(n int64) int64 {
- return atomic.AddInt64(&i.v, n)
+func (i *Int64) Add(delta int64) int64 {
+ return atomic.AddInt64(&i.v, delta)
}
// Sub atomically subtracts from the wrapped int64 and returns the new value.
-func (i *Int64) Sub(n int64) int64 {
- return atomic.AddInt64(&i.v, -n)
+func (i *Int64) Sub(delta int64) int64 {
+ return atomic.AddInt64(&i.v, -delta)
}
// Inc atomically increments the wrapped int64 and returns the new value.
@@ -66,18 +66,25 @@ func (i *Int64) Dec() int64 {
}
// CAS is an atomic compare-and-swap.
-func (i *Int64) CAS(old, new int64) bool {
+//
+// Deprecated: Use CompareAndSwap.
+func (i *Int64) CAS(old, new int64) (swapped bool) {
+ return i.CompareAndSwap(old, new)
+}
+
+// CompareAndSwap is an atomic compare-and-swap.
+func (i *Int64) CompareAndSwap(old, new int64) (swapped bool) {
return atomic.CompareAndSwapInt64(&i.v, old, new)
}
// Store atomically stores the passed value.
-func (i *Int64) Store(n int64) {
- atomic.StoreInt64(&i.v, n)
+func (i *Int64) Store(val int64) {
+ atomic.StoreInt64(&i.v, val)
}
// Swap atomically swaps the wrapped int64 and returns the old value.
-func (i *Int64) Swap(n int64) int64 {
- return atomic.SwapInt64(&i.v, n)
+func (i *Int64) Swap(val int64) (old int64) {
+ return atomic.SwapInt64(&i.v, val)
}
// MarshalJSON encodes the wrapped int64 into JSON.
diff --git a/vendor/go.uber.org/atomic/nocmp.go b/vendor/go.uber.org/atomic/nocmp.go
index a8201cb4a1..54b74174ab 100644
--- a/vendor/go.uber.org/atomic/nocmp.go
+++ b/vendor/go.uber.org/atomic/nocmp.go
@@ -23,13 +23,13 @@ package atomic
// nocmp is an uncomparable struct. Embed this inside another struct to make
// it uncomparable.
//
-// type Foo struct {
-// nocmp
-// // ...
-// }
+// type Foo struct {
+// nocmp
+// // ...
+// }
//
// This DOES NOT:
//
-// - Disallow shallow copies of structs
-// - Disallow comparison of pointers to uncomparable structs
+// - Disallow shallow copies of structs
+// - Disallow comparison of pointers to uncomparable structs
type nocmp [0]func()
diff --git a/vendor/go.uber.org/atomic/pointer_go118.go b/vendor/go.uber.org/atomic/pointer_go118.go
new file mode 100644
index 0000000000..e0f47dba46
--- /dev/null
+++ b/vendor/go.uber.org/atomic/pointer_go118.go
@@ -0,0 +1,60 @@
+// Copyright (c) 2022 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+//go:build go1.18 && !go1.19
+// +build go1.18,!go1.19
+
+package atomic
+
+import "unsafe"
+
+type Pointer[T any] struct {
+ _ nocmp // disallow non-atomic comparison
+ p UnsafePointer
+}
+
+// NewPointer creates a new Pointer.
+func NewPointer[T any](v *T) *Pointer[T] {
+ var p Pointer[T]
+ if v != nil {
+ p.p.Store(unsafe.Pointer(v))
+ }
+ return &p
+}
+
+// Load atomically loads the wrapped value.
+func (p *Pointer[T]) Load() *T {
+ return (*T)(p.p.Load())
+}
+
+// Store atomically stores the passed value.
+func (p *Pointer[T]) Store(val *T) {
+ p.p.Store(unsafe.Pointer(val))
+}
+
+// Swap atomically swaps the wrapped pointer and returns the old value.
+func (p *Pointer[T]) Swap(val *T) (old *T) {
+ return (*T)(p.p.Swap(unsafe.Pointer(val)))
+}
+
+// CompareAndSwap is an atomic compare-and-swap.
+func (p *Pointer[T]) CompareAndSwap(old, new *T) (swapped bool) {
+ return p.p.CompareAndSwap(unsafe.Pointer(old), unsafe.Pointer(new))
+}
diff --git a/vendor/go.uber.org/atomic/pointer_go119.go b/vendor/go.uber.org/atomic/pointer_go119.go
new file mode 100644
index 0000000000..6726f17ad6
--- /dev/null
+++ b/vendor/go.uber.org/atomic/pointer_go119.go
@@ -0,0 +1,61 @@
+// Copyright (c) 2022 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+//go:build go1.19
+// +build go1.19
+
+package atomic
+
+import "sync/atomic"
+
+// Pointer is an atomic pointer of type *T.
+type Pointer[T any] struct {
+ _ nocmp // disallow non-atomic comparison
+ p atomic.Pointer[T]
+}
+
+// NewPointer creates a new Pointer.
+func NewPointer[T any](v *T) *Pointer[T] {
+ var p Pointer[T]
+ if v != nil {
+ p.p.Store(v)
+ }
+ return &p
+}
+
+// Load atomically loads the wrapped value.
+func (p *Pointer[T]) Load() *T {
+ return p.p.Load()
+}
+
+// Store atomically stores the passed value.
+func (p *Pointer[T]) Store(val *T) {
+ p.p.Store(val)
+}
+
+// Swap atomically swaps the wrapped pointer and returns the old value.
+func (p *Pointer[T]) Swap(val *T) (old *T) {
+ return p.p.Swap(val)
+}
+
+// CompareAndSwap is an atomic compare-and-swap.
+func (p *Pointer[T]) CompareAndSwap(old, new *T) (swapped bool) {
+ return p.p.CompareAndSwap(old, new)
+}
diff --git a/vendor/go.uber.org/atomic/string.go b/vendor/go.uber.org/atomic/string.go
index 225b7a2be0..c4bea70f4d 100644
--- a/vendor/go.uber.org/atomic/string.go
+++ b/vendor/go.uber.org/atomic/string.go
@@ -1,6 +1,6 @@
// @generated Code generated by gen-atomicwrapper.
-// Copyright (c) 2020 Uber Technologies, Inc.
+// Copyright (c) 2020-2022 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -32,10 +32,10 @@ type String struct {
var _zeroString string
// NewString creates a new String.
-func NewString(v string) *String {
+func NewString(val string) *String {
x := &String{}
- if v != _zeroString {
- x.Store(v)
+ if val != _zeroString {
+ x.Store(val)
}
return x
}
@@ -49,6 +49,17 @@ func (x *String) Load() string {
}
// Store atomically stores the passed string.
-func (x *String) Store(v string) {
- x.v.Store(v)
+func (x *String) Store(val string) {
+ x.v.Store(val)
+}
+
+// CompareAndSwap is an atomic compare-and-swap for string values.
+func (x *String) CompareAndSwap(old, new string) (swapped bool) {
+ return x.v.CompareAndSwap(old, new)
+}
+
+// Swap atomically stores the given string and returns the old
+// value.
+func (x *String) Swap(val string) (old string) {
+ return x.v.Swap(val).(string)
}
diff --git a/vendor/go.uber.org/atomic/string_ext.go b/vendor/go.uber.org/atomic/string_ext.go
index 3a9558213d..1f63dfd5b9 100644
--- a/vendor/go.uber.org/atomic/string_ext.go
+++ b/vendor/go.uber.org/atomic/string_ext.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2020 Uber Technologies, Inc.
+// Copyright (c) 2020-2022 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -20,7 +20,7 @@
package atomic
-//go:generate bin/gen-atomicwrapper -name=String -type=string -wrapped=Value -file=string.go
+//go:generate bin/gen-atomicwrapper -name=String -type=string -wrapped=Value -compareandswap -swap -file=string.go
// String returns the wrapped value.
func (s *String) String() string {
diff --git a/vendor/go.uber.org/atomic/time.go b/vendor/go.uber.org/atomic/time.go
new file mode 100644
index 0000000000..1660feb142
--- /dev/null
+++ b/vendor/go.uber.org/atomic/time.go
@@ -0,0 +1,55 @@
+// @generated Code generated by gen-atomicwrapper.
+
+// Copyright (c) 2020-2022 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+import (
+ "time"
+)
+
+// Time is an atomic type-safe wrapper for time.Time values.
+type Time struct {
+ _ nocmp // disallow non-atomic comparison
+
+ v Value
+}
+
+var _zeroTime time.Time
+
+// NewTime creates a new Time.
+func NewTime(val time.Time) *Time {
+ x := &Time{}
+ if val != _zeroTime {
+ x.Store(val)
+ }
+ return x
+}
+
+// Load atomically loads the wrapped time.Time.
+func (x *Time) Load() time.Time {
+ return unpackTime(x.v.Load())
+}
+
+// Store atomically stores the passed time.Time.
+func (x *Time) Store(val time.Time) {
+ x.v.Store(packTime(val))
+}
diff --git a/vendor/go.uber.org/atomic/time_ext.go b/vendor/go.uber.org/atomic/time_ext.go
new file mode 100644
index 0000000000..1e3dc978aa
--- /dev/null
+++ b/vendor/go.uber.org/atomic/time_ext.go
@@ -0,0 +1,36 @@
+// Copyright (c) 2021 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+import "time"
+
+//go:generate bin/gen-atomicwrapper -name=Time -type=time.Time -wrapped=Value -pack=packTime -unpack=unpackTime -imports time -file=time.go
+
+func packTime(t time.Time) interface{} {
+ return t
+}
+
+func unpackTime(v interface{}) time.Time {
+ if t, ok := v.(time.Time); ok {
+ return t
+ }
+ return time.Time{}
+}
diff --git a/vendor/go.uber.org/atomic/uint32.go b/vendor/go.uber.org/atomic/uint32.go
index a973aba1a6..d6f04a96dc 100644
--- a/vendor/go.uber.org/atomic/uint32.go
+++ b/vendor/go.uber.org/atomic/uint32.go
@@ -1,6 +1,6 @@
// @generated Code generated by gen-atomicint.
-// Copyright (c) 2020 Uber Technologies, Inc.
+// Copyright (c) 2020-2022 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -36,8 +36,8 @@ type Uint32 struct {
}
// NewUint32 creates a new Uint32.
-func NewUint32(i uint32) *Uint32 {
- return &Uint32{v: i}
+func NewUint32(val uint32) *Uint32 {
+ return &Uint32{v: val}
}
// Load atomically loads the wrapped value.
@@ -46,13 +46,13 @@ func (i *Uint32) Load() uint32 {
}
// Add atomically adds to the wrapped uint32 and returns the new value.
-func (i *Uint32) Add(n uint32) uint32 {
- return atomic.AddUint32(&i.v, n)
+func (i *Uint32) Add(delta uint32) uint32 {
+ return atomic.AddUint32(&i.v, delta)
}
// Sub atomically subtracts from the wrapped uint32 and returns the new value.
-func (i *Uint32) Sub(n uint32) uint32 {
- return atomic.AddUint32(&i.v, ^(n - 1))
+func (i *Uint32) Sub(delta uint32) uint32 {
+ return atomic.AddUint32(&i.v, ^(delta - 1))
}
// Inc atomically increments the wrapped uint32 and returns the new value.
@@ -66,18 +66,25 @@ func (i *Uint32) Dec() uint32 {
}
// CAS is an atomic compare-and-swap.
-func (i *Uint32) CAS(old, new uint32) bool {
+//
+// Deprecated: Use CompareAndSwap.
+func (i *Uint32) CAS(old, new uint32) (swapped bool) {
+ return i.CompareAndSwap(old, new)
+}
+
+// CompareAndSwap is an atomic compare-and-swap.
+func (i *Uint32) CompareAndSwap(old, new uint32) (swapped bool) {
return atomic.CompareAndSwapUint32(&i.v, old, new)
}
// Store atomically stores the passed value.
-func (i *Uint32) Store(n uint32) {
- atomic.StoreUint32(&i.v, n)
+func (i *Uint32) Store(val uint32) {
+ atomic.StoreUint32(&i.v, val)
}
// Swap atomically swaps the wrapped uint32 and returns the old value.
-func (i *Uint32) Swap(n uint32) uint32 {
- return atomic.SwapUint32(&i.v, n)
+func (i *Uint32) Swap(val uint32) (old uint32) {
+ return atomic.SwapUint32(&i.v, val)
}
// MarshalJSON encodes the wrapped uint32 into JSON.
diff --git a/vendor/go.uber.org/atomic/uint64.go b/vendor/go.uber.org/atomic/uint64.go
index 3b6c71fd5a..2574bdd5ec 100644
--- a/vendor/go.uber.org/atomic/uint64.go
+++ b/vendor/go.uber.org/atomic/uint64.go
@@ -1,6 +1,6 @@
// @generated Code generated by gen-atomicint.
-// Copyright (c) 2020 Uber Technologies, Inc.
+// Copyright (c) 2020-2022 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -36,8 +36,8 @@ type Uint64 struct {
}
// NewUint64 creates a new Uint64.
-func NewUint64(i uint64) *Uint64 {
- return &Uint64{v: i}
+func NewUint64(val uint64) *Uint64 {
+ return &Uint64{v: val}
}
// Load atomically loads the wrapped value.
@@ -46,13 +46,13 @@ func (i *Uint64) Load() uint64 {
}
// Add atomically adds to the wrapped uint64 and returns the new value.
-func (i *Uint64) Add(n uint64) uint64 {
- return atomic.AddUint64(&i.v, n)
+func (i *Uint64) Add(delta uint64) uint64 {
+ return atomic.AddUint64(&i.v, delta)
}
// Sub atomically subtracts from the wrapped uint64 and returns the new value.
-func (i *Uint64) Sub(n uint64) uint64 {
- return atomic.AddUint64(&i.v, ^(n - 1))
+func (i *Uint64) Sub(delta uint64) uint64 {
+ return atomic.AddUint64(&i.v, ^(delta - 1))
}
// Inc atomically increments the wrapped uint64 and returns the new value.
@@ -66,18 +66,25 @@ func (i *Uint64) Dec() uint64 {
}
// CAS is an atomic compare-and-swap.
-func (i *Uint64) CAS(old, new uint64) bool {
+//
+// Deprecated: Use CompareAndSwap.
+func (i *Uint64) CAS(old, new uint64) (swapped bool) {
+ return i.CompareAndSwap(old, new)
+}
+
+// CompareAndSwap is an atomic compare-and-swap.
+func (i *Uint64) CompareAndSwap(old, new uint64) (swapped bool) {
return atomic.CompareAndSwapUint64(&i.v, old, new)
}
// Store atomically stores the passed value.
-func (i *Uint64) Store(n uint64) {
- atomic.StoreUint64(&i.v, n)
+func (i *Uint64) Store(val uint64) {
+ atomic.StoreUint64(&i.v, val)
}
// Swap atomically swaps the wrapped uint64 and returns the old value.
-func (i *Uint64) Swap(n uint64) uint64 {
- return atomic.SwapUint64(&i.v, n)
+func (i *Uint64) Swap(val uint64) (old uint64) {
+ return atomic.SwapUint64(&i.v, val)
}
// MarshalJSON encodes the wrapped uint64 into JSON.
diff --git a/vendor/go.uber.org/atomic/uintptr.go b/vendor/go.uber.org/atomic/uintptr.go
new file mode 100644
index 0000000000..81b275a7ad
--- /dev/null
+++ b/vendor/go.uber.org/atomic/uintptr.go
@@ -0,0 +1,109 @@
+// @generated Code generated by gen-atomicint.
+
+// Copyright (c) 2020-2022 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+import (
+ "encoding/json"
+ "strconv"
+ "sync/atomic"
+)
+
+// Uintptr is an atomic wrapper around uintptr.
+type Uintptr struct {
+ _ nocmp // disallow non-atomic comparison
+
+ v uintptr
+}
+
+// NewUintptr creates a new Uintptr.
+func NewUintptr(val uintptr) *Uintptr {
+ return &Uintptr{v: val}
+}
+
+// Load atomically loads the wrapped value.
+func (i *Uintptr) Load() uintptr {
+ return atomic.LoadUintptr(&i.v)
+}
+
+// Add atomically adds to the wrapped uintptr and returns the new value.
+func (i *Uintptr) Add(delta uintptr) uintptr {
+ return atomic.AddUintptr(&i.v, delta)
+}
+
+// Sub atomically subtracts from the wrapped uintptr and returns the new value.
+func (i *Uintptr) Sub(delta uintptr) uintptr {
+ return atomic.AddUintptr(&i.v, ^(delta - 1))
+}
+
+// Inc atomically increments the wrapped uintptr and returns the new value.
+func (i *Uintptr) Inc() uintptr {
+ return i.Add(1)
+}
+
+// Dec atomically decrements the wrapped uintptr and returns the new value.
+func (i *Uintptr) Dec() uintptr {
+ return i.Sub(1)
+}
+
+// CAS is an atomic compare-and-swap.
+//
+// Deprecated: Use CompareAndSwap.
+func (i *Uintptr) CAS(old, new uintptr) (swapped bool) {
+ return i.CompareAndSwap(old, new)
+}
+
+// CompareAndSwap is an atomic compare-and-swap.
+func (i *Uintptr) CompareAndSwap(old, new uintptr) (swapped bool) {
+ return atomic.CompareAndSwapUintptr(&i.v, old, new)
+}
+
+// Store atomically stores the passed value.
+func (i *Uintptr) Store(val uintptr) {
+ atomic.StoreUintptr(&i.v, val)
+}
+
+// Swap atomically swaps the wrapped uintptr and returns the old value.
+func (i *Uintptr) Swap(val uintptr) (old uintptr) {
+ return atomic.SwapUintptr(&i.v, val)
+}
+
+// MarshalJSON encodes the wrapped uintptr into JSON.
+func (i *Uintptr) MarshalJSON() ([]byte, error) {
+ return json.Marshal(i.Load())
+}
+
+// UnmarshalJSON decodes JSON into the wrapped uintptr.
+func (i *Uintptr) UnmarshalJSON(b []byte) error {
+ var v uintptr
+ if err := json.Unmarshal(b, &v); err != nil {
+ return err
+ }
+ i.Store(v)
+ return nil
+}
+
+// String encodes the wrapped value as a string.
+func (i *Uintptr) String() string {
+ v := i.Load()
+ return strconv.FormatUint(uint64(v), 10)
+}
diff --git a/vendor/go.uber.org/atomic/unsafe_pointer.go b/vendor/go.uber.org/atomic/unsafe_pointer.go
new file mode 100644
index 0000000000..34868baf6a
--- /dev/null
+++ b/vendor/go.uber.org/atomic/unsafe_pointer.go
@@ -0,0 +1,65 @@
+// Copyright (c) 2021-2022 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+import (
+ "sync/atomic"
+ "unsafe"
+)
+
+// UnsafePointer is an atomic wrapper around unsafe.Pointer.
+type UnsafePointer struct {
+ _ nocmp // disallow non-atomic comparison
+
+ v unsafe.Pointer
+}
+
+// NewUnsafePointer creates a new UnsafePointer.
+func NewUnsafePointer(val unsafe.Pointer) *UnsafePointer {
+ return &UnsafePointer{v: val}
+}
+
+// Load atomically loads the wrapped value.
+func (p *UnsafePointer) Load() unsafe.Pointer {
+ return atomic.LoadPointer(&p.v)
+}
+
+// Store atomically stores the passed value.
+func (p *UnsafePointer) Store(val unsafe.Pointer) {
+ atomic.StorePointer(&p.v, val)
+}
+
+// Swap atomically swaps the wrapped unsafe.Pointer and returns the old value.
+func (p *UnsafePointer) Swap(val unsafe.Pointer) (old unsafe.Pointer) {
+ return atomic.SwapPointer(&p.v, val)
+}
+
+// CAS is an atomic compare-and-swap.
+//
+// Deprecated: Use CompareAndSwap
+func (p *UnsafePointer) CAS(old, new unsafe.Pointer) (swapped bool) {
+ return p.CompareAndSwap(old, new)
+}
+
+// CompareAndSwap is an atomic compare-and-swap.
+func (p *UnsafePointer) CompareAndSwap(old, new unsafe.Pointer) (swapped bool) {
+ return atomic.CompareAndSwapPointer(&p.v, old, new)
+}
diff --git a/vendor/go.uber.org/atomic/value.go b/vendor/go.uber.org/atomic/value.go
index 671f3a3824..52caedb9a5 100644
--- a/vendor/go.uber.org/atomic/value.go
+++ b/vendor/go.uber.org/atomic/value.go
@@ -25,7 +25,7 @@ import "sync/atomic"
// Value shadows the type of the same name from sync/atomic
// https://godoc.org/sync/atomic#Value
type Value struct {
- atomic.Value
-
_ nocmp // disallow non-atomic comparison
+
+ atomic.Value
}
diff --git a/vendor/go.uber.org/multierr/.travis.yml b/vendor/go.uber.org/multierr/.travis.yml
deleted file mode 100644
index 8636ab42ad..0000000000
--- a/vendor/go.uber.org/multierr/.travis.yml
+++ /dev/null
@@ -1,23 +0,0 @@
-sudo: false
-language: go
-go_import_path: go.uber.org/multierr
-
-env:
- global:
- - GO111MODULE=on
-
-go:
- - oldstable
- - stable
-
-before_install:
-- go version
-
-script:
-- |
- set -e
- make lint
- make cover
-
-after_success:
-- bash <(curl -s https://codecov.io/bash)
diff --git a/vendor/go.uber.org/multierr/CHANGELOG.md b/vendor/go.uber.org/multierr/CHANGELOG.md
index 6f1db9ef4a..f8177b978c 100644
--- a/vendor/go.uber.org/multierr/CHANGELOG.md
+++ b/vendor/go.uber.org/multierr/CHANGELOG.md
@@ -1,6 +1,41 @@
Releases
========
+v1.11.0 (2023-03-28)
+====================
+- `Errors` now supports any error that implements multiple-error
+ interface.
+- Add `Every` function to allow checking if all errors in the chain
+ satisfies `errors.Is` against the target error.
+
+v1.10.0 (2023-03-08)
+====================
+
+- Comply with Go 1.20's multiple-error interface.
+- Drop Go 1.18 support.
+ Per the support policy, only Go 1.19 and 1.20 are supported now.
+- Drop all non-test external dependencies.
+
+v1.9.0 (2022-12-12)
+===================
+
+- Add `AppendFunc` that allow passsing functions to similar to
+ `AppendInvoke`.
+
+- Bump up yaml.v3 dependency to 3.0.1.
+
+v1.8.0 (2022-02-28)
+===================
+
+- `Combine`: perform zero allocations when there are no errors.
+
+
+v1.7.0 (2021-05-06)
+===================
+
+- Add `AppendInvoke` to append into errors from `defer` blocks.
+
+
v1.6.0 (2020-09-14)
===================
diff --git a/vendor/go.uber.org/multierr/LICENSE.txt b/vendor/go.uber.org/multierr/LICENSE.txt
index 858e02475f..413e30f7ce 100644
--- a/vendor/go.uber.org/multierr/LICENSE.txt
+++ b/vendor/go.uber.org/multierr/LICENSE.txt
@@ -1,4 +1,4 @@
-Copyright (c) 2017 Uber Technologies, Inc.
+Copyright (c) 2017-2021 Uber Technologies, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
diff --git a/vendor/go.uber.org/multierr/Makefile b/vendor/go.uber.org/multierr/Makefile
index 316004400b..dcb6fe723c 100644
--- a/vendor/go.uber.org/multierr/Makefile
+++ b/vendor/go.uber.org/multierr/Makefile
@@ -34,9 +34,5 @@ lint: gofmt golint staticcheck
.PHONY: cover
cover:
- go test -coverprofile=cover.out -coverpkg=./... -v ./...
+ go test -race -coverprofile=cover.out -coverpkg=./... -v ./...
go tool cover -html=cover.out -o cover.html
-
-update-license:
- @cd tools && go install go.uber.org/tools/update-license
- @$(GOBIN)/update-license $(GO_FILES)
diff --git a/vendor/go.uber.org/multierr/README.md b/vendor/go.uber.org/multierr/README.md
index 751bd65e58..5ab6ac40f4 100644
--- a/vendor/go.uber.org/multierr/README.md
+++ b/vendor/go.uber.org/multierr/README.md
@@ -2,9 +2,29 @@
`multierr` allows combining one or more Go `error`s together.
+## Features
+
+- **Idiomatic**:
+ multierr follows best practices in Go, and keeps your code idiomatic.
+ - It keeps the underlying error type hidden,
+ allowing you to deal in `error` values exclusively.
+ - It provides APIs to safely append into an error from a `defer` statement.
+- **Performant**:
+ multierr is optimized for performance:
+ - It avoids allocations where possible.
+ - It utilizes slice resizing semantics to optimize common cases
+ like appending into the same error object from a loop.
+- **Interoperable**:
+ multierr interoperates with the Go standard library's error APIs seamlessly:
+ - The `errors.Is` and `errors.As` functions *just work*.
+- **Lightweight**:
+ multierr comes with virtually no dependencies.
+
## Installation
- go get -u go.uber.org/multierr
+```bash
+go get -u go.uber.org/multierr@latest
+```
## Status
@@ -15,9 +35,9 @@ Stable: No breaking changes will be made before 2.0.
Released under the [MIT License].
[MIT License]: LICENSE.txt
-[doc-img]: https://godoc.org/go.uber.org/multierr?status.svg
-[doc]: https://godoc.org/go.uber.org/multierr
-[ci-img]: https://travis-ci.com/uber-go/multierr.svg?branch=master
+[doc-img]: https://pkg.go.dev/badge/go.uber.org/multierr
+[doc]: https://pkg.go.dev/go.uber.org/multierr
+[ci-img]: https://github.com/uber-go/multierr/actions/workflows/go.yml/badge.svg
[cov-img]: https://codecov.io/gh/uber-go/multierr/branch/master/graph/badge.svg
-[ci]: https://travis-ci.com/uber-go/multierr
+[ci]: https://github.com/uber-go/multierr/actions/workflows/go.yml
[cov]: https://codecov.io/gh/uber-go/multierr
diff --git a/vendor/go.uber.org/multierr/error.go b/vendor/go.uber.org/multierr/error.go
index 5c9b67d537..3a828b2dff 100644
--- a/vendor/go.uber.org/multierr/error.go
+++ b/vendor/go.uber.org/multierr/error.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2019 Uber Technologies, Inc.
+// Copyright (c) 2017-2023 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -20,54 +20,109 @@
// Package multierr allows combining one or more errors together.
//
-// Overview
+// # Overview
//
// Errors can be combined with the use of the Combine function.
//
-// multierr.Combine(
-// reader.Close(),
-// writer.Close(),
-// conn.Close(),
-// )
+// multierr.Combine(
+// reader.Close(),
+// writer.Close(),
+// conn.Close(),
+// )
//
// If only two errors are being combined, the Append function may be used
// instead.
//
-// err = multierr.Append(reader.Close(), writer.Close())
-//
-// This makes it possible to record resource cleanup failures from deferred
-// blocks with the help of named return values.
-//
-// func sendRequest(req Request) (err error) {
-// conn, err := openConnection()
-// if err != nil {
-// return err
-// }
-// defer func() {
-// err = multierr.Append(err, conn.Close())
-// }()
-// // ...
-// }
+// err = multierr.Append(reader.Close(), writer.Close())
//
// The underlying list of errors for a returned error object may be retrieved
// with the Errors function.
//
-// errors := multierr.Errors(err)
-// if len(errors) > 0 {
-// fmt.Println("The following errors occurred:", errors)
-// }
+// errors := multierr.Errors(err)
+// if len(errors) > 0 {
+// fmt.Println("The following errors occurred:", errors)
+// }
+//
+// # Appending from a loop
+//
+// You sometimes need to append into an error from a loop.
+//
+// var err error
+// for _, item := range items {
+// err = multierr.Append(err, process(item))
+// }
+//
+// Cases like this may require knowledge of whether an individual instance
+// failed. This usually requires introduction of a new variable.
+//
+// var err error
+// for _, item := range items {
+// if perr := process(item); perr != nil {
+// log.Warn("skipping item", item)
+// err = multierr.Append(err, perr)
+// }
+// }
+//
+// multierr includes AppendInto to simplify cases like this.
+//
+// var err error
+// for _, item := range items {
+// if multierr.AppendInto(&err, process(item)) {
+// log.Warn("skipping item", item)
+// }
+// }
+//
+// This will append the error into the err variable, and return true if that
+// individual error was non-nil.
//
-// Advanced Usage
+// See [AppendInto] for more information.
+//
+// # Deferred Functions
+//
+// Go makes it possible to modify the return value of a function in a defer
+// block if the function was using named returns. This makes it possible to
+// record resource cleanup failures from deferred blocks.
+//
+// func sendRequest(req Request) (err error) {
+// conn, err := openConnection()
+// if err != nil {
+// return err
+// }
+// defer func() {
+// err = multierr.Append(err, conn.Close())
+// }()
+// // ...
+// }
+//
+// multierr provides the Invoker type and AppendInvoke function to make cases
+// like the above simpler and obviate the need for a closure. The following is
+// roughly equivalent to the example above.
+//
+// func sendRequest(req Request) (err error) {
+// conn, err := openConnection()
+// if err != nil {
+// return err
+// }
+// defer multierr.AppendInvoke(&err, multierr.Close(conn))
+// // ...
+// }
+//
+// See [AppendInvoke] and [Invoker] for more information.
+//
+// NOTE: If you're modifying an error from inside a defer, you MUST use a named
+// return value for that function.
+//
+// # Advanced Usage
//
// Errors returned by Combine and Append MAY implement the following
// interface.
//
-// type errorGroup interface {
-// // Returns a slice containing the underlying list of errors.
-// //
-// // This slice MUST NOT be modified by the caller.
-// Errors() []error
-// }
+// type errorGroup interface {
+// // Returns a slice containing the underlying list of errors.
+// //
+// // This slice MUST NOT be modified by the caller.
+// Errors() []error
+// }
//
// Note that if you need access to list of errors behind a multierr error, you
// should prefer using the Errors function. That said, if you need cheap
@@ -76,23 +131,23 @@
// because errors returned by Combine and Append are not guaranteed to
// implement this interface.
//
-// var errors []error
-// group, ok := err.(errorGroup)
-// if ok {
-// errors = group.Errors()
-// } else {
-// errors = []error{err}
-// }
+// var errors []error
+// group, ok := err.(errorGroup)
+// if ok {
+// errors = group.Errors()
+// } else {
+// errors = []error{err}
+// }
package multierr // import "go.uber.org/multierr"
import (
"bytes"
+ "errors"
"fmt"
"io"
"strings"
"sync"
-
- "go.uber.org/atomic"
+ "sync/atomic"
)
var (
@@ -132,34 +187,15 @@ type errorGroup interface {
// Errors returns a slice containing zero or more errors that the supplied
// error is composed of. If the error is nil, a nil slice is returned.
//
-// err := multierr.Append(r.Close(), w.Close())
-// errors := multierr.Errors(err)
+// err := multierr.Append(r.Close(), w.Close())
+// errors := multierr.Errors(err)
//
// If the error is not composed of other errors, the returned slice contains
// just the error that was passed in.
//
// Callers of this function are free to modify the returned slice.
func Errors(err error) []error {
- if err == nil {
- return nil
- }
-
- // Note that we're casting to multiError, not errorGroup. Our contract is
- // that returned errors MAY implement errorGroup. Errors, however, only
- // has special behavior for multierr-specific error objects.
- //
- // This behavior can be expanded in the future but I think it's prudent to
- // start with as little as possible in terms of contract and possibility
- // of misuse.
- eg, ok := err.(*multiError)
- if !ok {
- return []error{err}
- }
-
- errors := eg.Errors()
- result := make([]error, len(errors))
- copy(result, errors)
- return result
+ return extractErrors(err)
}
// multiError is an error that holds one or more errors.
@@ -174,8 +210,6 @@ type multiError struct {
errors []error
}
-var _ errorGroup = (*multiError)(nil)
-
// Errors returns the list of underlying errors.
//
// This slice MUST NOT be modified.
@@ -201,6 +235,17 @@ func (merr *multiError) Error() string {
return result
}
+// Every compares every error in the given err against the given target error
+// using [errors.Is], and returns true only if every comparison returned true.
+func Every(err error, target error) bool {
+ for _, e := range extractErrors(err) {
+ if !errors.Is(e, target) {
+ return false
+ }
+ }
+ return true
+}
+
func (merr *multiError) Format(f fmt.State, c rune) {
if c == 'v' && f.Flag('+') {
merr.writeMultiline(f)
@@ -292,6 +337,14 @@ func inspect(errors []error) (res inspectResult) {
// fromSlice converts the given list of errors into a single error.
func fromSlice(errors []error) error {
+ // Don't pay to inspect small slices.
+ switch len(errors) {
+ case 0:
+ return nil
+ case 1:
+ return errors[0]
+ }
+
res := inspect(errors)
switch res.Count {
case 0:
@@ -301,8 +354,12 @@ func fromSlice(errors []error) error {
return errors[res.FirstErrorIdx]
case len(errors):
if !res.ContainsMultiError {
- // already flat
- return &multiError{errors: errors}
+ // Error list is flat. Make a copy of it
+ // Otherwise "errors" escapes to the heap
+ // unconditionally for all other cases.
+ // This lets us optimize for the "no errors" case.
+ out := append(([]error)(nil), errors...)
+ return &multiError{errors: out}
}
}
@@ -327,32 +384,32 @@ func fromSlice(errors []error) error {
// If zero arguments were passed or if all items are nil, a nil error is
// returned.
//
-// Combine(nil, nil) // == nil
+// Combine(nil, nil) // == nil
//
// If only a single error was passed, it is returned as-is.
//
-// Combine(err) // == err
+// Combine(err) // == err
//
// Combine skips over nil arguments so this function may be used to combine
// together errors from operations that fail independently of each other.
//
-// multierr.Combine(
-// reader.Close(),
-// writer.Close(),
-// pipe.Close(),
-// )
+// multierr.Combine(
+// reader.Close(),
+// writer.Close(),
+// pipe.Close(),
+// )
//
// If any of the passed errors is a multierr error, it will be flattened along
// with the other errors.
//
-// multierr.Combine(multierr.Combine(err1, err2), err3)
-// // is the same as
-// multierr.Combine(err1, err2, err3)
+// multierr.Combine(multierr.Combine(err1, err2), err3)
+// // is the same as
+// multierr.Combine(err1, err2, err3)
//
// The returned error formats into a readable multi-line error message if
// formatted with %+v.
//
-// fmt.Sprintf("%+v", multierr.Combine(err1, err2))
+// fmt.Sprintf("%+v", multierr.Combine(err1, err2))
func Combine(errors ...error) error {
return fromSlice(errors)
}
@@ -362,16 +419,19 @@ func Combine(errors ...error) error {
// This function is a specialization of Combine for the common case where
// there are only two errors.
//
-// err = multierr.Append(reader.Close(), writer.Close())
+// err = multierr.Append(reader.Close(), writer.Close())
//
// The following pattern may also be used to record failure of deferred
// operations without losing information about the original error.
//
-// func doSomething(..) (err error) {
-// f := acquireResource()
-// defer func() {
-// err = multierr.Append(err, f.Close())
-// }()
+// func doSomething(..) (err error) {
+// f := acquireResource()
+// defer func() {
+// err = multierr.Append(err, f.Close())
+// }()
+//
+// Note that the variable MUST be a named return to append an error to it from
+// the defer statement. See also [AppendInvoke].
func Append(left error, right error) error {
switch {
case left == nil:
@@ -401,37 +461,37 @@ func Append(left error, right error) error {
// AppendInto appends an error into the destination of an error pointer and
// returns whether the error being appended was non-nil.
//
-// var err error
-// multierr.AppendInto(&err, r.Close())
-// multierr.AppendInto(&err, w.Close())
+// var err error
+// multierr.AppendInto(&err, r.Close())
+// multierr.AppendInto(&err, w.Close())
//
// The above is equivalent to,
//
-// err := multierr.Append(r.Close(), w.Close())
+// err := multierr.Append(r.Close(), w.Close())
//
// As AppendInto reports whether the provided error was non-nil, it may be
// used to build a multierr error in a loop more ergonomically. For example:
//
-// var err error
-// for line := range lines {
-// var item Item
-// if multierr.AppendInto(&err, parse(line, &item)) {
-// continue
-// }
-// items = append(items, item)
-// }
-//
-// Compare this with a verison that relies solely on Append:
-//
-// var err error
-// for line := range lines {
-// var item Item
-// if parseErr := parse(line, &item); parseErr != nil {
-// err = multierr.Append(err, parseErr)
-// continue
-// }
-// items = append(items, item)
-// }
+// var err error
+// for line := range lines {
+// var item Item
+// if multierr.AppendInto(&err, parse(line, &item)) {
+// continue
+// }
+// items = append(items, item)
+// }
+//
+// Compare this with a version that relies solely on Append:
+//
+// var err error
+// for line := range lines {
+// var item Item
+// if parseErr := parse(line, &item); parseErr != nil {
+// err = multierr.Append(err, parseErr)
+// continue
+// }
+// items = append(items, item)
+// }
func AppendInto(into *error, err error) (errored bool) {
if into == nil {
// We panic if 'into' is nil. This is not documented above
@@ -447,3 +507,140 @@ func AppendInto(into *error, err error) (errored bool) {
*into = Append(*into, err)
return true
}
+
+// Invoker is an operation that may fail with an error. Use it with
+// AppendInvoke to append the result of calling the function into an error.
+// This allows you to conveniently defer capture of failing operations.
+//
+// See also, [Close] and [Invoke].
+type Invoker interface {
+ Invoke() error
+}
+
+// Invoke wraps a function which may fail with an error to match the Invoker
+// interface. Use it to supply functions matching this signature to
+// AppendInvoke.
+//
+// For example,
+//
+// func processReader(r io.Reader) (err error) {
+// scanner := bufio.NewScanner(r)
+// defer multierr.AppendInvoke(&err, multierr.Invoke(scanner.Err))
+// for scanner.Scan() {
+// // ...
+// }
+// // ...
+// }
+//
+// In this example, the following line will construct the Invoker right away,
+// but defer the invocation of scanner.Err() until the function returns.
+//
+// defer multierr.AppendInvoke(&err, multierr.Invoke(scanner.Err))
+//
+// Note that the error you're appending to from the defer statement MUST be a
+// named return.
+type Invoke func() error
+
+// Invoke calls the supplied function and returns its result.
+func (i Invoke) Invoke() error { return i() }
+
+// Close builds an Invoker that closes the provided io.Closer. Use it with
+// AppendInvoke to close io.Closers and append their results into an error.
+//
+// For example,
+//
+// func processFile(path string) (err error) {
+// f, err := os.Open(path)
+// if err != nil {
+// return err
+// }
+// defer multierr.AppendInvoke(&err, multierr.Close(f))
+// return processReader(f)
+// }
+//
+// In this example, multierr.Close will construct the Invoker right away, but
+// defer the invocation of f.Close until the function returns.
+//
+// defer multierr.AppendInvoke(&err, multierr.Close(f))
+//
+// Note that the error you're appending to from the defer statement MUST be a
+// named return.
+func Close(closer io.Closer) Invoker {
+ return Invoke(closer.Close)
+}
+
+// AppendInvoke appends the result of calling the given Invoker into the
+// provided error pointer. Use it with named returns to safely defer
+// invocation of fallible operations until a function returns, and capture the
+// resulting errors.
+//
+// func doSomething(...) (err error) {
+// // ...
+// f, err := openFile(..)
+// if err != nil {
+// return err
+// }
+//
+// // multierr will call f.Close() when this function returns and
+// // if the operation fails, its append its error into the
+// // returned error.
+// defer multierr.AppendInvoke(&err, multierr.Close(f))
+//
+// scanner := bufio.NewScanner(f)
+// // Similarly, this scheduled scanner.Err to be called and
+// // inspected when the function returns and append its error
+// // into the returned error.
+// defer multierr.AppendInvoke(&err, multierr.Invoke(scanner.Err))
+//
+// // ...
+// }
+//
+// NOTE: If used with a defer, the error variable MUST be a named return.
+//
+// Without defer, AppendInvoke behaves exactly like AppendInto.
+//
+// err := // ...
+// multierr.AppendInvoke(&err, mutltierr.Invoke(foo))
+//
+// // ...is roughly equivalent to...
+//
+// err := // ...
+// multierr.AppendInto(&err, foo())
+//
+// The advantage of the indirection introduced by Invoker is to make it easy
+// to defer the invocation of a function. Without this indirection, the
+// invoked function will be evaluated at the time of the defer block rather
+// than when the function returns.
+//
+// // BAD: This is likely not what the caller intended. This will evaluate
+// // foo() right away and append its result into the error when the
+// // function returns.
+// defer multierr.AppendInto(&err, foo())
+//
+// // GOOD: This will defer invocation of foo unutil the function returns.
+// defer multierr.AppendInvoke(&err, multierr.Invoke(foo))
+//
+// multierr provides a few Invoker implementations out of the box for
+// convenience. See [Invoker] for more information.
+func AppendInvoke(into *error, invoker Invoker) {
+ AppendInto(into, invoker.Invoke())
+}
+
+// AppendFunc is a shorthand for [AppendInvoke].
+// It allows using function or method value directly
+// without having to wrap it into an [Invoker] interface.
+//
+// func doSomething(...) (err error) {
+// w, err := startWorker(...)
+// if err != nil {
+// return err
+// }
+//
+// // multierr will call w.Stop() when this function returns and
+// // if the operation fails, it appends its error into the
+// // returned error.
+// defer multierr.AppendFunc(&err, w.Stop)
+// }
+func AppendFunc(into *error, fn func() error) {
+ AppendInvoke(into, Invoke(fn))
+}
diff --git a/vendor/go.uber.org/multierr/error_post_go120.go b/vendor/go.uber.org/multierr/error_post_go120.go
new file mode 100644
index 0000000000..a173f9c251
--- /dev/null
+++ b/vendor/go.uber.org/multierr/error_post_go120.go
@@ -0,0 +1,48 @@
+// Copyright (c) 2017-2023 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+//go:build go1.20
+// +build go1.20
+
+package multierr
+
+// Unwrap returns a list of errors wrapped by this multierr.
+func (merr *multiError) Unwrap() []error {
+ return merr.Errors()
+}
+
+type multipleErrors interface {
+ Unwrap() []error
+}
+
+func extractErrors(err error) []error {
+ if err == nil {
+ return nil
+ }
+
+ // check if the given err is an Unwrapable error that
+ // implements multipleErrors interface.
+ eg, ok := err.(multipleErrors)
+ if !ok {
+ return []error{err}
+ }
+
+ return append(([]error)(nil), eg.Unwrap()...)
+}
diff --git a/vendor/go.uber.org/multierr/go113.go b/vendor/go.uber.org/multierr/error_pre_go120.go
similarity index 66%
rename from vendor/go.uber.org/multierr/go113.go
rename to vendor/go.uber.org/multierr/error_pre_go120.go
index 264b0eac0d..93872a3fcd 100644
--- a/vendor/go.uber.org/multierr/go113.go
+++ b/vendor/go.uber.org/multierr/error_pre_go120.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2019 Uber Technologies, Inc.
+// Copyright (c) 2017-2023 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -18,12 +18,19 @@
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
-// +build go1.13
+//go:build !go1.20
+// +build !go1.20
package multierr
import "errors"
+// Versions of Go before 1.20 did not support the Unwrap() []error method.
+// This provides a similar behavior by implementing the Is(..) and As(..)
+// methods.
+// See the errors.Join proposal for details:
+// https://github.com/golang/go/issues/53435
+
// As attempts to find the first error in the error list that matches the type
// of the value that target points to.
//
@@ -50,3 +57,23 @@ func (merr *multiError) Is(target error) bool {
}
return false
}
+
+func extractErrors(err error) []error {
+ if err == nil {
+ return nil
+ }
+
+ // Note that we're casting to multiError, not errorGroup. Our contract is
+ // that returned errors MAY implement errorGroup. Errors, however, only
+ // has special behavior for multierr-specific error objects.
+ //
+ // This behavior can be expanded in the future but I think it's prudent to
+ // start with as little as possible in terms of contract and possibility
+ // of misuse.
+ eg, ok := err.(*multiError)
+ if !ok {
+ return []error{err}
+ }
+
+ return append(([]error)(nil), eg.Errors()...)
+}
diff --git a/vendor/go.uber.org/multierr/glide.yaml b/vendor/go.uber.org/multierr/glide.yaml
deleted file mode 100644
index 6ef084ec24..0000000000
--- a/vendor/go.uber.org/multierr/glide.yaml
+++ /dev/null
@@ -1,8 +0,0 @@
-package: go.uber.org/multierr
-import:
-- package: go.uber.org/atomic
- version: ^1
-testImport:
-- package: github.com/stretchr/testify
- subpackages:
- - assert
diff --git a/vendor/golang.org/x/crypto/hkdf/hkdf.go b/vendor/golang.org/x/crypto/hkdf/hkdf.go
new file mode 100644
index 0000000000..f4ded5fee2
--- /dev/null
+++ b/vendor/golang.org/x/crypto/hkdf/hkdf.go
@@ -0,0 +1,95 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package hkdf implements the HMAC-based Extract-and-Expand Key Derivation
+// Function (HKDF) as defined in RFC 5869.
+//
+// HKDF is a cryptographic key derivation function (KDF) with the goal of
+// expanding limited input keying material into one or more cryptographically
+// strong secret keys.
+package hkdf // import "golang.org/x/crypto/hkdf"
+
+import (
+ "crypto/hmac"
+ "errors"
+ "hash"
+ "io"
+)
+
+// Extract generates a pseudorandom key for use with Expand from an input secret
+// and an optional independent salt.
+//
+// Only use this function if you need to reuse the extracted key with multiple
+// Expand invocations and different context values. Most common scenarios,
+// including the generation of multiple keys, should use New instead.
+func Extract(hash func() hash.Hash, secret, salt []byte) []byte {
+ if salt == nil {
+ salt = make([]byte, hash().Size())
+ }
+ extractor := hmac.New(hash, salt)
+ extractor.Write(secret)
+ return extractor.Sum(nil)
+}
+
+type hkdf struct {
+ expander hash.Hash
+ size int
+
+ info []byte
+ counter byte
+
+ prev []byte
+ buf []byte
+}
+
+func (f *hkdf) Read(p []byte) (int, error) {
+ // Check whether enough data can be generated
+ need := len(p)
+ remains := len(f.buf) + int(255-f.counter+1)*f.size
+ if remains < need {
+ return 0, errors.New("hkdf: entropy limit reached")
+ }
+ // Read any leftover from the buffer
+ n := copy(p, f.buf)
+ p = p[n:]
+
+ // Fill the rest of the buffer
+ for len(p) > 0 {
+ if f.counter > 1 {
+ f.expander.Reset()
+ }
+ f.expander.Write(f.prev)
+ f.expander.Write(f.info)
+ f.expander.Write([]byte{f.counter})
+ f.prev = f.expander.Sum(f.prev[:0])
+ f.counter++
+
+ // Copy the new batch into p
+ f.buf = f.prev
+ n = copy(p, f.buf)
+ p = p[n:]
+ }
+ // Save leftovers for next run
+ f.buf = f.buf[n:]
+
+ return need, nil
+}
+
+// Expand returns a Reader, from which keys can be read, using the given
+// pseudorandom key and optional context info, skipping the extraction step.
+//
+// The pseudorandomKey should have been generated by Extract, or be a uniformly
+// random or pseudorandom cryptographically strong key. See RFC 5869, Section
+// 3.3. Most common scenarios will want to use New instead.
+func Expand(hash func() hash.Hash, pseudorandomKey, info []byte) io.Reader {
+ expander := hmac.New(hash, pseudorandomKey)
+ return &hkdf{expander, expander.Size(), info, 1, nil, nil}
+}
+
+// New returns a Reader, from which keys can be read, using the given hash,
+// secret, salt and context info. Salt and info can be nil.
+func New(hash func() hash.Hash, secret, salt, info []byte) io.Reader {
+ prk := Extract(hash, secret, salt)
+ return Expand(hash, prk, info)
+}
diff --git a/vendor/golang.org/x/mod/LICENSE b/vendor/golang.org/x/mod/LICENSE
new file mode 100644
index 0000000000..6a66aea5ea
--- /dev/null
+++ b/vendor/golang.org/x/mod/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2009 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/golang.org/x/mod/PATENTS b/vendor/golang.org/x/mod/PATENTS
new file mode 100644
index 0000000000..733099041f
--- /dev/null
+++ b/vendor/golang.org/x/mod/PATENTS
@@ -0,0 +1,22 @@
+Additional IP Rights Grant (Patents)
+
+"This implementation" means the copyrightable works distributed by
+Google as part of the Go project.
+
+Google hereby grants to You a perpetual, worldwide, non-exclusive,
+no-charge, royalty-free, irrevocable (except as stated in this section)
+patent license to make, have made, use, offer to sell, sell, import,
+transfer and otherwise run, modify and propagate the contents of this
+implementation of Go, where such license applies only to those patent
+claims, both currently owned or controlled by Google and acquired in
+the future, licensable by Google that are necessarily infringed by this
+implementation of Go. This grant does not include claims that would be
+infringed only as a consequence of further modification of this
+implementation. If you or your agent or exclusive licensee institute or
+order or agree to the institution of patent litigation against any
+entity (including a cross-claim or counterclaim in a lawsuit) alleging
+that this implementation of Go or any code incorporated within this
+implementation of Go constitutes direct or contributory patent
+infringement, or inducement of patent infringement, then any patent
+rights granted to you under this License for this implementation of Go
+shall terminate as of the date such litigation is filed.
diff --git a/vendor/golang.org/x/mod/semver/semver.go b/vendor/golang.org/x/mod/semver/semver.go
new file mode 100644
index 0000000000..9a2dfd33a7
--- /dev/null
+++ b/vendor/golang.org/x/mod/semver/semver.go
@@ -0,0 +1,401 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package semver implements comparison of semantic version strings.
+// In this package, semantic version strings must begin with a leading "v",
+// as in "v1.0.0".
+//
+// The general form of a semantic version string accepted by this package is
+//
+// vMAJOR[.MINOR[.PATCH[-PRERELEASE][+BUILD]]]
+//
+// where square brackets indicate optional parts of the syntax;
+// MAJOR, MINOR, and PATCH are decimal integers without extra leading zeros;
+// PRERELEASE and BUILD are each a series of non-empty dot-separated identifiers
+// using only alphanumeric characters and hyphens; and
+// all-numeric PRERELEASE identifiers must not have leading zeros.
+//
+// This package follows Semantic Versioning 2.0.0 (see semver.org)
+// with two exceptions. First, it requires the "v" prefix. Second, it recognizes
+// vMAJOR and vMAJOR.MINOR (with no prerelease or build suffixes)
+// as shorthands for vMAJOR.0.0 and vMAJOR.MINOR.0.
+package semver
+
+import "sort"
+
+// parsed returns the parsed form of a semantic version string.
+type parsed struct {
+ major string
+ minor string
+ patch string
+ short string
+ prerelease string
+ build string
+}
+
+// IsValid reports whether v is a valid semantic version string.
+func IsValid(v string) bool {
+ _, ok := parse(v)
+ return ok
+}
+
+// Canonical returns the canonical formatting of the semantic version v.
+// It fills in any missing .MINOR or .PATCH and discards build metadata.
+// Two semantic versions compare equal only if their canonical formattings
+// are identical strings.
+// The canonical invalid semantic version is the empty string.
+func Canonical(v string) string {
+ p, ok := parse(v)
+ if !ok {
+ return ""
+ }
+ if p.build != "" {
+ return v[:len(v)-len(p.build)]
+ }
+ if p.short != "" {
+ return v + p.short
+ }
+ return v
+}
+
+// Major returns the major version prefix of the semantic version v.
+// For example, Major("v2.1.0") == "v2".
+// If v is an invalid semantic version string, Major returns the empty string.
+func Major(v string) string {
+ pv, ok := parse(v)
+ if !ok {
+ return ""
+ }
+ return v[:1+len(pv.major)]
+}
+
+// MajorMinor returns the major.minor version prefix of the semantic version v.
+// For example, MajorMinor("v2.1.0") == "v2.1".
+// If v is an invalid semantic version string, MajorMinor returns the empty string.
+func MajorMinor(v string) string {
+ pv, ok := parse(v)
+ if !ok {
+ return ""
+ }
+ i := 1 + len(pv.major)
+ if j := i + 1 + len(pv.minor); j <= len(v) && v[i] == '.' && v[i+1:j] == pv.minor {
+ return v[:j]
+ }
+ return v[:i] + "." + pv.minor
+}
+
+// Prerelease returns the prerelease suffix of the semantic version v.
+// For example, Prerelease("v2.1.0-pre+meta") == "-pre".
+// If v is an invalid semantic version string, Prerelease returns the empty string.
+func Prerelease(v string) string {
+ pv, ok := parse(v)
+ if !ok {
+ return ""
+ }
+ return pv.prerelease
+}
+
+// Build returns the build suffix of the semantic version v.
+// For example, Build("v2.1.0+meta") == "+meta".
+// If v is an invalid semantic version string, Build returns the empty string.
+func Build(v string) string {
+ pv, ok := parse(v)
+ if !ok {
+ return ""
+ }
+ return pv.build
+}
+
+// Compare returns an integer comparing two versions according to
+// semantic version precedence.
+// The result will be 0 if v == w, -1 if v < w, or +1 if v > w.
+//
+// An invalid semantic version string is considered less than a valid one.
+// All invalid semantic version strings compare equal to each other.
+func Compare(v, w string) int {
+ pv, ok1 := parse(v)
+ pw, ok2 := parse(w)
+ if !ok1 && !ok2 {
+ return 0
+ }
+ if !ok1 {
+ return -1
+ }
+ if !ok2 {
+ return +1
+ }
+ if c := compareInt(pv.major, pw.major); c != 0 {
+ return c
+ }
+ if c := compareInt(pv.minor, pw.minor); c != 0 {
+ return c
+ }
+ if c := compareInt(pv.patch, pw.patch); c != 0 {
+ return c
+ }
+ return comparePrerelease(pv.prerelease, pw.prerelease)
+}
+
+// Max canonicalizes its arguments and then returns the version string
+// that compares greater.
+//
+// Deprecated: use [Compare] instead. In most cases, returning a canonicalized
+// version is not expected or desired.
+func Max(v, w string) string {
+ v = Canonical(v)
+ w = Canonical(w)
+ if Compare(v, w) > 0 {
+ return v
+ }
+ return w
+}
+
+// ByVersion implements [sort.Interface] for sorting semantic version strings.
+type ByVersion []string
+
+func (vs ByVersion) Len() int { return len(vs) }
+func (vs ByVersion) Swap(i, j int) { vs[i], vs[j] = vs[j], vs[i] }
+func (vs ByVersion) Less(i, j int) bool {
+ cmp := Compare(vs[i], vs[j])
+ if cmp != 0 {
+ return cmp < 0
+ }
+ return vs[i] < vs[j]
+}
+
+// Sort sorts a list of semantic version strings using [ByVersion].
+func Sort(list []string) {
+ sort.Sort(ByVersion(list))
+}
+
+func parse(v string) (p parsed, ok bool) {
+ if v == "" || v[0] != 'v' {
+ return
+ }
+ p.major, v, ok = parseInt(v[1:])
+ if !ok {
+ return
+ }
+ if v == "" {
+ p.minor = "0"
+ p.patch = "0"
+ p.short = ".0.0"
+ return
+ }
+ if v[0] != '.' {
+ ok = false
+ return
+ }
+ p.minor, v, ok = parseInt(v[1:])
+ if !ok {
+ return
+ }
+ if v == "" {
+ p.patch = "0"
+ p.short = ".0"
+ return
+ }
+ if v[0] != '.' {
+ ok = false
+ return
+ }
+ p.patch, v, ok = parseInt(v[1:])
+ if !ok {
+ return
+ }
+ if len(v) > 0 && v[0] == '-' {
+ p.prerelease, v, ok = parsePrerelease(v)
+ if !ok {
+ return
+ }
+ }
+ if len(v) > 0 && v[0] == '+' {
+ p.build, v, ok = parseBuild(v)
+ if !ok {
+ return
+ }
+ }
+ if v != "" {
+ ok = false
+ return
+ }
+ ok = true
+ return
+}
+
+func parseInt(v string) (t, rest string, ok bool) {
+ if v == "" {
+ return
+ }
+ if v[0] < '0' || '9' < v[0] {
+ return
+ }
+ i := 1
+ for i < len(v) && '0' <= v[i] && v[i] <= '9' {
+ i++
+ }
+ if v[0] == '0' && i != 1 {
+ return
+ }
+ return v[:i], v[i:], true
+}
+
+func parsePrerelease(v string) (t, rest string, ok bool) {
+ // "A pre-release version MAY be denoted by appending a hyphen and
+ // a series of dot separated identifiers immediately following the patch version.
+ // Identifiers MUST comprise only ASCII alphanumerics and hyphen [0-9A-Za-z-].
+ // Identifiers MUST NOT be empty. Numeric identifiers MUST NOT include leading zeroes."
+ if v == "" || v[0] != '-' {
+ return
+ }
+ i := 1
+ start := 1
+ for i < len(v) && v[i] != '+' {
+ if !isIdentChar(v[i]) && v[i] != '.' {
+ return
+ }
+ if v[i] == '.' {
+ if start == i || isBadNum(v[start:i]) {
+ return
+ }
+ start = i + 1
+ }
+ i++
+ }
+ if start == i || isBadNum(v[start:i]) {
+ return
+ }
+ return v[:i], v[i:], true
+}
+
+func parseBuild(v string) (t, rest string, ok bool) {
+ if v == "" || v[0] != '+' {
+ return
+ }
+ i := 1
+ start := 1
+ for i < len(v) {
+ if !isIdentChar(v[i]) && v[i] != '.' {
+ return
+ }
+ if v[i] == '.' {
+ if start == i {
+ return
+ }
+ start = i + 1
+ }
+ i++
+ }
+ if start == i {
+ return
+ }
+ return v[:i], v[i:], true
+}
+
+func isIdentChar(c byte) bool {
+ return 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || '0' <= c && c <= '9' || c == '-'
+}
+
+func isBadNum(v string) bool {
+ i := 0
+ for i < len(v) && '0' <= v[i] && v[i] <= '9' {
+ i++
+ }
+ return i == len(v) && i > 1 && v[0] == '0'
+}
+
+func isNum(v string) bool {
+ i := 0
+ for i < len(v) && '0' <= v[i] && v[i] <= '9' {
+ i++
+ }
+ return i == len(v)
+}
+
+func compareInt(x, y string) int {
+ if x == y {
+ return 0
+ }
+ if len(x) < len(y) {
+ return -1
+ }
+ if len(x) > len(y) {
+ return +1
+ }
+ if x < y {
+ return -1
+ } else {
+ return +1
+ }
+}
+
+func comparePrerelease(x, y string) int {
+ // "When major, minor, and patch are equal, a pre-release version has
+ // lower precedence than a normal version.
+ // Example: 1.0.0-alpha < 1.0.0.
+ // Precedence for two pre-release versions with the same major, minor,
+ // and patch version MUST be determined by comparing each dot separated
+ // identifier from left to right until a difference is found as follows:
+ // identifiers consisting of only digits are compared numerically and
+ // identifiers with letters or hyphens are compared lexically in ASCII
+ // sort order. Numeric identifiers always have lower precedence than
+ // non-numeric identifiers. A larger set of pre-release fields has a
+ // higher precedence than a smaller set, if all of the preceding
+ // identifiers are equal.
+ // Example: 1.0.0-alpha < 1.0.0-alpha.1 < 1.0.0-alpha.beta <
+ // 1.0.0-beta < 1.0.0-beta.2 < 1.0.0-beta.11 < 1.0.0-rc.1 < 1.0.0."
+ if x == y {
+ return 0
+ }
+ if x == "" {
+ return +1
+ }
+ if y == "" {
+ return -1
+ }
+ for x != "" && y != "" {
+ x = x[1:] // skip - or .
+ y = y[1:] // skip - or .
+ var dx, dy string
+ dx, x = nextIdent(x)
+ dy, y = nextIdent(y)
+ if dx != dy {
+ ix := isNum(dx)
+ iy := isNum(dy)
+ if ix != iy {
+ if ix {
+ return -1
+ } else {
+ return +1
+ }
+ }
+ if ix {
+ if len(dx) < len(dy) {
+ return -1
+ }
+ if len(dx) > len(dy) {
+ return +1
+ }
+ }
+ if dx < dy {
+ return -1
+ } else {
+ return +1
+ }
+ }
+ }
+ if x == "" {
+ return -1
+ } else {
+ return +1
+ }
+}
+
+func nextIdent(x string) (dx, rest string) {
+ i := 0
+ for i < len(x) && x[i] != '.' {
+ i++
+ }
+ return x[:i], x[i:]
+}
diff --git a/vendor/golang.org/x/tools/cmd/stringer/stringer.go b/vendor/golang.org/x/tools/cmd/stringer/stringer.go
new file mode 100644
index 0000000000..2b19c93e8e
--- /dev/null
+++ b/vendor/golang.org/x/tools/cmd/stringer/stringer.go
@@ -0,0 +1,660 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Stringer is a tool to automate the creation of methods that satisfy the fmt.Stringer
+// interface. Given the name of a (signed or unsigned) integer type T that has constants
+// defined, stringer will create a new self-contained Go source file implementing
+//
+// func (t T) String() string
+//
+// The file is created in the same package and directory as the package that defines T.
+// It has helpful defaults designed for use with go generate.
+//
+// Stringer works best with constants that are consecutive values such as created using iota,
+// but creates good code regardless. In the future it might also provide custom support for
+// constant sets that are bit patterns.
+//
+// For example, given this snippet,
+//
+// package painkiller
+//
+// type Pill int
+//
+// const (
+// Placebo Pill = iota
+// Aspirin
+// Ibuprofen
+// Paracetamol
+// Acetaminophen = Paracetamol
+// )
+//
+// running this command
+//
+// stringer -type=Pill
+//
+// in the same directory will create the file pill_string.go, in package painkiller,
+// containing a definition of
+//
+// func (Pill) String() string
+//
+// That method will translate the value of a Pill constant to the string representation
+// of the respective constant name, so that the call fmt.Print(painkiller.Aspirin) will
+// print the string "Aspirin".
+//
+// Typically this process would be run using go generate, like this:
+//
+// //go:generate stringer -type=Pill
+//
+// If multiple constants have the same value, the lexically first matching name will
+// be used (in the example, Acetaminophen will print as "Paracetamol").
+//
+// With no arguments, it processes the package in the current directory.
+// Otherwise, the arguments must name a single directory holding a Go package
+// or a set of Go source files that represent a single Go package.
+//
+// The -type flag accepts a comma-separated list of types so a single run can
+// generate methods for multiple types. The default output file is t_string.go,
+// where t is the lower-cased name of the first type listed. It can be overridden
+// with the -output flag.
+//
+// The -linecomment flag tells stringer to generate the text of any line comment, trimmed
+// of leading spaces, instead of the constant name. For instance, if the constants above had a
+// Pill prefix, one could write
+//
+// PillAspirin // Aspirin
+//
+// to suppress it in the output.
+package main // import "golang.org/x/tools/cmd/stringer"
+
+import (
+ "bytes"
+ "flag"
+ "fmt"
+ "go/ast"
+ "go/constant"
+ "go/format"
+ "go/token"
+ "go/types"
+ "log"
+ "os"
+ "path/filepath"
+ "sort"
+ "strings"
+
+ "golang.org/x/tools/go/packages"
+)
+
+var (
+ typeNames = flag.String("type", "", "comma-separated list of type names; must be set")
+ output = flag.String("output", "", "output file name; default srcdir/_string.go")
+ trimprefix = flag.String("trimprefix", "", "trim the `prefix` from the generated constant names")
+ linecomment = flag.Bool("linecomment", false, "use line comment text as printed text when present")
+ buildTags = flag.String("tags", "", "comma-separated list of build tags to apply")
+)
+
+// Usage is a replacement usage function for the flags package.
+func Usage() {
+ fmt.Fprintf(os.Stderr, "Usage of stringer:\n")
+ fmt.Fprintf(os.Stderr, "\tstringer [flags] -type T [directory]\n")
+ fmt.Fprintf(os.Stderr, "\tstringer [flags] -type T files... # Must be a single package\n")
+ fmt.Fprintf(os.Stderr, "For more information, see:\n")
+ fmt.Fprintf(os.Stderr, "\thttps://pkg.go.dev/golang.org/x/tools/cmd/stringer\n")
+ fmt.Fprintf(os.Stderr, "Flags:\n")
+ flag.PrintDefaults()
+}
+
+func main() {
+ log.SetFlags(0)
+ log.SetPrefix("stringer: ")
+ flag.Usage = Usage
+ flag.Parse()
+ if len(*typeNames) == 0 {
+ flag.Usage()
+ os.Exit(2)
+ }
+ types := strings.Split(*typeNames, ",")
+ var tags []string
+ if len(*buildTags) > 0 {
+ tags = strings.Split(*buildTags, ",")
+ }
+
+ // We accept either one directory or a list of files. Which do we have?
+ args := flag.Args()
+ if len(args) == 0 {
+ // Default: process whole package in current directory.
+ args = []string{"."}
+ }
+
+ // Parse the package once.
+ var dir string
+ g := Generator{
+ trimPrefix: *trimprefix,
+ lineComment: *linecomment,
+ }
+ // TODO(suzmue): accept other patterns for packages (directories, list of files, import paths, etc).
+ if len(args) == 1 && isDirectory(args[0]) {
+ dir = args[0]
+ } else {
+ if len(tags) != 0 {
+ log.Fatal("-tags option applies only to directories, not when files are specified")
+ }
+ dir = filepath.Dir(args[0])
+ }
+
+ g.parsePackage(args, tags)
+
+ // Print the header and package clause.
+ g.Printf("// Code generated by \"stringer %s\"; DO NOT EDIT.\n", strings.Join(os.Args[1:], " "))
+ g.Printf("\n")
+ g.Printf("package %s", g.pkg.name)
+ g.Printf("\n")
+ g.Printf("import \"strconv\"\n") // Used by all methods.
+
+ // Run generate for each type.
+ for _, typeName := range types {
+ g.generate(typeName)
+ }
+
+ // Format the output.
+ src := g.format()
+
+ // Write to file.
+ outputName := *output
+ if outputName == "" {
+ baseName := fmt.Sprintf("%s_string.go", types[0])
+ outputName = filepath.Join(dir, strings.ToLower(baseName))
+ }
+ err := os.WriteFile(outputName, src, 0644)
+ if err != nil {
+ log.Fatalf("writing output: %s", err)
+ }
+}
+
+// isDirectory reports whether the named file is a directory.
+func isDirectory(name string) bool {
+ info, err := os.Stat(name)
+ if err != nil {
+ log.Fatal(err)
+ }
+ return info.IsDir()
+}
+
+// Generator holds the state of the analysis. Primarily used to buffer
+// the output for format.Source.
+type Generator struct {
+ buf bytes.Buffer // Accumulated output.
+ pkg *Package // Package we are scanning.
+
+ trimPrefix string
+ lineComment bool
+
+ logf func(format string, args ...interface{}) // test logging hook; nil when not testing
+}
+
+func (g *Generator) Printf(format string, args ...interface{}) {
+ fmt.Fprintf(&g.buf, format, args...)
+}
+
+// File holds a single parsed file and associated data.
+type File struct {
+ pkg *Package // Package to which this file belongs.
+ file *ast.File // Parsed AST.
+ // These fields are reset for each type being generated.
+ typeName string // Name of the constant type.
+ values []Value // Accumulator for constant values of that type.
+
+ trimPrefix string
+ lineComment bool
+}
+
+type Package struct {
+ name string
+ defs map[*ast.Ident]types.Object
+ files []*File
+}
+
+// parsePackage analyzes the single package constructed from the patterns and tags.
+// parsePackage exits if there is an error.
+func (g *Generator) parsePackage(patterns []string, tags []string) {
+ cfg := &packages.Config{
+ Mode: packages.NeedName | packages.NeedTypes | packages.NeedTypesInfo | packages.NeedSyntax,
+ // TODO: Need to think about constants in test files. Maybe write type_string_test.go
+ // in a separate pass? For later.
+ Tests: false,
+ BuildFlags: []string{fmt.Sprintf("-tags=%s", strings.Join(tags, " "))},
+ Logf: g.logf,
+ }
+ pkgs, err := packages.Load(cfg, patterns...)
+ if err != nil {
+ log.Fatal(err)
+ }
+ if len(pkgs) != 1 {
+ log.Fatalf("error: %d packages matching %v", len(pkgs), strings.Join(patterns, " "))
+ }
+ g.addPackage(pkgs[0])
+}
+
+// addPackage adds a type checked Package and its syntax files to the generator.
+func (g *Generator) addPackage(pkg *packages.Package) {
+ g.pkg = &Package{
+ name: pkg.Name,
+ defs: pkg.TypesInfo.Defs,
+ files: make([]*File, len(pkg.Syntax)),
+ }
+
+ for i, file := range pkg.Syntax {
+ g.pkg.files[i] = &File{
+ file: file,
+ pkg: g.pkg,
+ trimPrefix: g.trimPrefix,
+ lineComment: g.lineComment,
+ }
+ }
+}
+
+// generate produces the String method for the named type.
+func (g *Generator) generate(typeName string) {
+ values := make([]Value, 0, 100)
+ for _, file := range g.pkg.files {
+ // Set the state for this run of the walker.
+ file.typeName = typeName
+ file.values = nil
+ if file.file != nil {
+ ast.Inspect(file.file, file.genDecl)
+ values = append(values, file.values...)
+ }
+ }
+
+ if len(values) == 0 {
+ log.Fatalf("no values defined for type %s", typeName)
+ }
+ // Generate code that will fail if the constants change value.
+ g.Printf("func _() {\n")
+ g.Printf("\t// An \"invalid array index\" compiler error signifies that the constant values have changed.\n")
+ g.Printf("\t// Re-run the stringer command to generate them again.\n")
+ g.Printf("\tvar x [1]struct{}\n")
+ for _, v := range values {
+ g.Printf("\t_ = x[%s - %s]\n", v.originalName, v.str)
+ }
+ g.Printf("}\n")
+ runs := splitIntoRuns(values)
+ // The decision of which pattern to use depends on the number of
+ // runs in the numbers. If there's only one, it's easy. For more than
+ // one, there's a tradeoff between complexity and size of the data
+ // and code vs. the simplicity of a map. A map takes more space,
+ // but so does the code. The decision here (crossover at 10) is
+ // arbitrary, but considers that for large numbers of runs the cost
+ // of the linear scan in the switch might become important, and
+ // rather than use yet another algorithm such as binary search,
+ // we punt and use a map. In any case, the likelihood of a map
+ // being necessary for any realistic example other than bitmasks
+ // is very low. And bitmasks probably deserve their own analysis,
+ // to be done some other day.
+ switch {
+ case len(runs) == 1:
+ g.buildOneRun(runs, typeName)
+ case len(runs) <= 10:
+ g.buildMultipleRuns(runs, typeName)
+ default:
+ g.buildMap(runs, typeName)
+ }
+}
+
+// splitIntoRuns breaks the values into runs of contiguous sequences.
+// For example, given 1,2,3,5,6,7 it returns {1,2,3},{5,6,7}.
+// The input slice is known to be non-empty.
+func splitIntoRuns(values []Value) [][]Value {
+ // We use stable sort so the lexically first name is chosen for equal elements.
+ sort.Stable(byValue(values))
+ // Remove duplicates. Stable sort has put the one we want to print first,
+ // so use that one. The String method won't care about which named constant
+ // was the argument, so the first name for the given value is the only one to keep.
+ // We need to do this because identical values would cause the switch or map
+ // to fail to compile.
+ j := 1
+ for i := 1; i < len(values); i++ {
+ if values[i].value != values[i-1].value {
+ values[j] = values[i]
+ j++
+ }
+ }
+ values = values[:j]
+ runs := make([][]Value, 0, 10)
+ for len(values) > 0 {
+ // One contiguous sequence per outer loop.
+ i := 1
+ for i < len(values) && values[i].value == values[i-1].value+1 {
+ i++
+ }
+ runs = append(runs, values[:i])
+ values = values[i:]
+ }
+ return runs
+}
+
+// format returns the gofmt-ed contents of the Generator's buffer.
+func (g *Generator) format() []byte {
+ src, err := format.Source(g.buf.Bytes())
+ if err != nil {
+ // Should never happen, but can arise when developing this code.
+ // The user can compile the output to see the error.
+ log.Printf("warning: internal error: invalid Go generated: %s", err)
+ log.Printf("warning: compile the package to analyze the error")
+ return g.buf.Bytes()
+ }
+ return src
+}
+
+// Value represents a declared constant.
+type Value struct {
+ originalName string // The name of the constant.
+ name string // The name with trimmed prefix.
+ // The value is stored as a bit pattern alone. The boolean tells us
+ // whether to interpret it as an int64 or a uint64; the only place
+ // this matters is when sorting.
+ // Much of the time the str field is all we need; it is printed
+ // by Value.String.
+ value uint64 // Will be converted to int64 when needed.
+ signed bool // Whether the constant is a signed type.
+ str string // The string representation given by the "go/constant" package.
+}
+
+func (v *Value) String() string {
+ return v.str
+}
+
+// byValue lets us sort the constants into increasing order.
+// We take care in the Less method to sort in signed or unsigned order,
+// as appropriate.
+type byValue []Value
+
+func (b byValue) Len() int { return len(b) }
+func (b byValue) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
+func (b byValue) Less(i, j int) bool {
+ if b[i].signed {
+ return int64(b[i].value) < int64(b[j].value)
+ }
+ return b[i].value < b[j].value
+}
+
+// genDecl processes one declaration clause.
+func (f *File) genDecl(node ast.Node) bool {
+ decl, ok := node.(*ast.GenDecl)
+ if !ok || decl.Tok != token.CONST {
+ // We only care about const declarations.
+ return true
+ }
+ // The name of the type of the constants we are declaring.
+ // Can change if this is a multi-element declaration.
+ typ := ""
+ // Loop over the elements of the declaration. Each element is a ValueSpec:
+ // a list of names possibly followed by a type, possibly followed by values.
+ // If the type and value are both missing, we carry down the type (and value,
+ // but the "go/types" package takes care of that).
+ for _, spec := range decl.Specs {
+ vspec := spec.(*ast.ValueSpec) // Guaranteed to succeed as this is CONST.
+ if vspec.Type == nil && len(vspec.Values) > 0 {
+ // "X = 1". With no type but a value. If the constant is untyped,
+ // skip this vspec and reset the remembered type.
+ typ = ""
+
+ // If this is a simple type conversion, remember the type.
+ // We don't mind if this is actually a call; a qualified call won't
+ // be matched (that will be SelectorExpr, not Ident), and only unusual
+ // situations will result in a function call that appears to be
+ // a type conversion.
+ ce, ok := vspec.Values[0].(*ast.CallExpr)
+ if !ok {
+ continue
+ }
+ id, ok := ce.Fun.(*ast.Ident)
+ if !ok {
+ continue
+ }
+ typ = id.Name
+ }
+ if vspec.Type != nil {
+ // "X T". We have a type. Remember it.
+ ident, ok := vspec.Type.(*ast.Ident)
+ if !ok {
+ continue
+ }
+ typ = ident.Name
+ }
+ if typ != f.typeName {
+ // This is not the type we're looking for.
+ continue
+ }
+ // We now have a list of names (from one line of source code) all being
+ // declared with the desired type.
+ // Grab their names and actual values and store them in f.values.
+ for _, name := range vspec.Names {
+ if name.Name == "_" {
+ continue
+ }
+ // This dance lets the type checker find the values for us. It's a
+ // bit tricky: look up the object declared by the name, find its
+ // types.Const, and extract its value.
+ obj, ok := f.pkg.defs[name]
+ if !ok {
+ log.Fatalf("no value for constant %s", name)
+ }
+ info := obj.Type().Underlying().(*types.Basic).Info()
+ if info&types.IsInteger == 0 {
+ log.Fatalf("can't handle non-integer constant type %s", typ)
+ }
+ value := obj.(*types.Const).Val() // Guaranteed to succeed as this is CONST.
+ if value.Kind() != constant.Int {
+ log.Fatalf("can't happen: constant is not an integer %s", name)
+ }
+ i64, isInt := constant.Int64Val(value)
+ u64, isUint := constant.Uint64Val(value)
+ if !isInt && !isUint {
+ log.Fatalf("internal error: value of %s is not an integer: %s", name, value.String())
+ }
+ if !isInt {
+ u64 = uint64(i64)
+ }
+ v := Value{
+ originalName: name.Name,
+ value: u64,
+ signed: info&types.IsUnsigned == 0,
+ str: value.String(),
+ }
+ if c := vspec.Comment; f.lineComment && c != nil && len(c.List) == 1 {
+ v.name = strings.TrimSpace(c.Text())
+ } else {
+ v.name = strings.TrimPrefix(v.originalName, f.trimPrefix)
+ }
+ f.values = append(f.values, v)
+ }
+ }
+ return false
+}
+
+// Helpers
+
+// usize returns the number of bits of the smallest unsigned integer
+// type that will hold n. Used to create the smallest possible slice of
+// integers to use as indexes into the concatenated strings.
+func usize(n int) int {
+ switch {
+ case n < 1<<8:
+ return 8
+ case n < 1<<16:
+ return 16
+ default:
+ // 2^32 is enough constants for anyone.
+ return 32
+ }
+}
+
+// declareIndexAndNameVars declares the index slices and concatenated names
+// strings representing the runs of values.
+func (g *Generator) declareIndexAndNameVars(runs [][]Value, typeName string) {
+ var indexes, names []string
+ for i, run := range runs {
+ index, name := g.createIndexAndNameDecl(run, typeName, fmt.Sprintf("_%d", i))
+ if len(run) != 1 {
+ indexes = append(indexes, index)
+ }
+ names = append(names, name)
+ }
+ g.Printf("const (\n")
+ for _, name := range names {
+ g.Printf("\t%s\n", name)
+ }
+ g.Printf(")\n\n")
+
+ if len(indexes) > 0 {
+ g.Printf("var (")
+ for _, index := range indexes {
+ g.Printf("\t%s\n", index)
+ }
+ g.Printf(")\n\n")
+ }
+}
+
+// declareIndexAndNameVar is the single-run version of declareIndexAndNameVars
+func (g *Generator) declareIndexAndNameVar(run []Value, typeName string) {
+ index, name := g.createIndexAndNameDecl(run, typeName, "")
+ g.Printf("const %s\n", name)
+ g.Printf("var %s\n", index)
+}
+
+// createIndexAndNameDecl returns the pair of declarations for the run. The caller will add "const" and "var".
+func (g *Generator) createIndexAndNameDecl(run []Value, typeName string, suffix string) (string, string) {
+ b := new(bytes.Buffer)
+ indexes := make([]int, len(run))
+ for i := range run {
+ b.WriteString(run[i].name)
+ indexes[i] = b.Len()
+ }
+ nameConst := fmt.Sprintf("_%s_name%s = %q", typeName, suffix, b.String())
+ nameLen := b.Len()
+ b.Reset()
+ fmt.Fprintf(b, "_%s_index%s = [...]uint%d{0, ", typeName, suffix, usize(nameLen))
+ for i, v := range indexes {
+ if i > 0 {
+ fmt.Fprintf(b, ", ")
+ }
+ fmt.Fprintf(b, "%d", v)
+ }
+ fmt.Fprintf(b, "}")
+ return b.String(), nameConst
+}
+
+// declareNameVars declares the concatenated names string representing all the values in the runs.
+func (g *Generator) declareNameVars(runs [][]Value, typeName string, suffix string) {
+ g.Printf("const _%s_name%s = \"", typeName, suffix)
+ for _, run := range runs {
+ for i := range run {
+ g.Printf("%s", run[i].name)
+ }
+ }
+ g.Printf("\"\n")
+}
+
+// buildOneRun generates the variables and String method for a single run of contiguous values.
+func (g *Generator) buildOneRun(runs [][]Value, typeName string) {
+ values := runs[0]
+ g.Printf("\n")
+ g.declareIndexAndNameVar(values, typeName)
+ // The generated code is simple enough to write as a Printf format.
+ lessThanZero := ""
+ if values[0].signed {
+ lessThanZero = "i < 0 || "
+ }
+ if values[0].value == 0 { // Signed or unsigned, 0 is still 0.
+ g.Printf(stringOneRun, typeName, usize(len(values)), lessThanZero)
+ } else {
+ g.Printf(stringOneRunWithOffset, typeName, values[0].String(), usize(len(values)), lessThanZero)
+ }
+}
+
+// Arguments to format are:
+//
+// [1]: type name
+// [2]: size of index element (8 for uint8 etc.)
+// [3]: less than zero check (for signed types)
+const stringOneRun = `func (i %[1]s) String() string {
+ if %[3]si >= %[1]s(len(_%[1]s_index)-1) {
+ return "%[1]s(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+ return _%[1]s_name[_%[1]s_index[i]:_%[1]s_index[i+1]]
+}
+`
+
+// Arguments to format are:
+// [1]: type name
+// [2]: lowest defined value for type, as a string
+// [3]: size of index element (8 for uint8 etc.)
+// [4]: less than zero check (for signed types)
+/*
+ */
+const stringOneRunWithOffset = `func (i %[1]s) String() string {
+ i -= %[2]s
+ if %[4]si >= %[1]s(len(_%[1]s_index)-1) {
+ return "%[1]s(" + strconv.FormatInt(int64(i + %[2]s), 10) + ")"
+ }
+ return _%[1]s_name[_%[1]s_index[i] : _%[1]s_index[i+1]]
+}
+`
+
+// buildMultipleRuns generates the variables and String method for multiple runs of contiguous values.
+// For this pattern, a single Printf format won't do.
+func (g *Generator) buildMultipleRuns(runs [][]Value, typeName string) {
+ g.Printf("\n")
+ g.declareIndexAndNameVars(runs, typeName)
+ g.Printf("func (i %s) String() string {\n", typeName)
+ g.Printf("\tswitch {\n")
+ for i, values := range runs {
+ if len(values) == 1 {
+ g.Printf("\tcase i == %s:\n", &values[0])
+ g.Printf("\t\treturn _%s_name_%d\n", typeName, i)
+ continue
+ }
+ if values[0].value == 0 && !values[0].signed {
+ // For an unsigned lower bound of 0, "0 <= i" would be redundant.
+ g.Printf("\tcase i <= %s:\n", &values[len(values)-1])
+ } else {
+ g.Printf("\tcase %s <= i && i <= %s:\n", &values[0], &values[len(values)-1])
+ }
+ if values[0].value != 0 {
+ g.Printf("\t\ti -= %s\n", &values[0])
+ }
+ g.Printf("\t\treturn _%s_name_%d[_%s_index_%d[i]:_%s_index_%d[i+1]]\n",
+ typeName, i, typeName, i, typeName, i)
+ }
+ g.Printf("\tdefault:\n")
+ g.Printf("\t\treturn \"%s(\" + strconv.FormatInt(int64(i), 10) + \")\"\n", typeName)
+ g.Printf("\t}\n")
+ g.Printf("}\n")
+}
+
+// buildMap handles the case where the space is so sparse a map is a reasonable fallback.
+// It's a rare situation but has simple code.
+func (g *Generator) buildMap(runs [][]Value, typeName string) {
+ g.Printf("\n")
+ g.declareNameVars(runs, typeName, "")
+ g.Printf("\nvar _%s_map = map[%s]string{\n", typeName, typeName)
+ n := 0
+ for _, values := range runs {
+ for _, value := range values {
+ g.Printf("\t%s: _%s_name[%d:%d],\n", &value, typeName, n, n+len(value.name))
+ n += len(value.name)
+ }
+ }
+ g.Printf("}\n\n")
+ g.Printf(stringMap, typeName)
+}
+
+// Argument to format is the type name.
+const stringMap = `func (i %[1]s) String() string {
+ if str, ok := _%[1]s_map[i]; ok {
+ return str
+ }
+ return "%[1]s(" + strconv.FormatInt(int64(i), 10) + ")"
+}
+`
diff --git a/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go b/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go
new file mode 100644
index 0000000000..03543bd4bb
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go
@@ -0,0 +1,186 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package gcexportdata provides functions for locating, reading, and
+// writing export data files containing type information produced by the
+// gc compiler. This package supports go1.7 export data format and all
+// later versions.
+//
+// Although it might seem convenient for this package to live alongside
+// go/types in the standard library, this would cause version skew
+// problems for developer tools that use it, since they must be able to
+// consume the outputs of the gc compiler both before and after a Go
+// update such as from Go 1.7 to Go 1.8. Because this package lives in
+// golang.org/x/tools, sites can update their version of this repo some
+// time before the Go 1.8 release and rebuild and redeploy their
+// developer tools, which will then be able to consume both Go 1.7 and
+// Go 1.8 export data files, so they will work before and after the
+// Go update. (See discussion at https://golang.org/issue/15651.)
+package gcexportdata // import "golang.org/x/tools/go/gcexportdata"
+
+import (
+ "bufio"
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "go/token"
+ "go/types"
+ "io"
+ "os/exec"
+
+ "golang.org/x/tools/internal/gcimporter"
+)
+
+// Find returns the name of an object (.o) or archive (.a) file
+// containing type information for the specified import path,
+// using the go command.
+// If no file was found, an empty filename is returned.
+//
+// A relative srcDir is interpreted relative to the current working directory.
+//
+// Find also returns the package's resolved (canonical) import path,
+// reflecting the effects of srcDir and vendoring on importPath.
+//
+// Deprecated: Use the higher-level API in golang.org/x/tools/go/packages,
+// which is more efficient.
+func Find(importPath, srcDir string) (filename, path string) {
+ cmd := exec.Command("go", "list", "-json", "-export", "--", importPath)
+ cmd.Dir = srcDir
+ out, err := cmd.CombinedOutput()
+ if err != nil {
+ return "", ""
+ }
+ var data struct {
+ ImportPath string
+ Export string
+ }
+ json.Unmarshal(out, &data)
+ return data.Export, data.ImportPath
+}
+
+// NewReader returns a reader for the export data section of an object
+// (.o) or archive (.a) file read from r. The new reader may provide
+// additional trailing data beyond the end of the export data.
+func NewReader(r io.Reader) (io.Reader, error) {
+ buf := bufio.NewReader(r)
+ _, size, err := gcimporter.FindExportData(buf)
+ if err != nil {
+ return nil, err
+ }
+
+ if size >= 0 {
+ // We were given an archive and found the __.PKGDEF in it.
+ // This tells us the size of the export data, and we don't
+ // need to return the entire file.
+ return &io.LimitedReader{
+ R: buf,
+ N: size,
+ }, nil
+ } else {
+ // We were given an object file. As such, we don't know how large
+ // the export data is and must return the entire file.
+ return buf, nil
+ }
+}
+
+// readAll works the same way as io.ReadAll, but avoids allocations and copies
+// by preallocating a byte slice of the necessary size if the size is known up
+// front. This is always possible when the input is an archive. In that case,
+// NewReader will return the known size using an io.LimitedReader.
+func readAll(r io.Reader) ([]byte, error) {
+ if lr, ok := r.(*io.LimitedReader); ok {
+ data := make([]byte, lr.N)
+ _, err := io.ReadFull(lr, data)
+ return data, err
+ }
+ return io.ReadAll(r)
+}
+
+// Read reads export data from in, decodes it, and returns type
+// information for the package.
+//
+// The package path (effectively its linker symbol prefix) is
+// specified by path, since unlike the package name, this information
+// may not be recorded in the export data.
+//
+// File position information is added to fset.
+//
+// Read may inspect and add to the imports map to ensure that references
+// within the export data to other packages are consistent. The caller
+// must ensure that imports[path] does not exist, or exists but is
+// incomplete (see types.Package.Complete), and Read inserts the
+// resulting package into this map entry.
+//
+// On return, the state of the reader is undefined.
+func Read(in io.Reader, fset *token.FileSet, imports map[string]*types.Package, path string) (*types.Package, error) {
+ data, err := readAll(in)
+ if err != nil {
+ return nil, fmt.Errorf("reading export data for %q: %v", path, err)
+ }
+
+ if bytes.HasPrefix(data, []byte("!")) {
+ return nil, fmt.Errorf("can't read export data for %q directly from an archive file (call gcexportdata.NewReader first to extract export data)", path)
+ }
+
+ // The indexed export format starts with an 'i'; the older
+ // binary export format starts with a 'c', 'd', or 'v'
+ // (from "version"). Select appropriate importer.
+ if len(data) > 0 {
+ switch data[0] {
+ case 'v', 'c', 'd': // binary, till go1.10
+ return nil, fmt.Errorf("binary (%c) import format is no longer supported", data[0])
+
+ case 'i': // indexed, till go1.19
+ _, pkg, err := gcimporter.IImportData(fset, imports, data[1:], path)
+ return pkg, err
+
+ case 'u': // unified, from go1.20
+ _, pkg, err := gcimporter.UImportData(fset, imports, data[1:], path)
+ return pkg, err
+
+ default:
+ l := len(data)
+ if l > 10 {
+ l = 10
+ }
+ return nil, fmt.Errorf("unexpected export data with prefix %q for path %s", string(data[:l]), path)
+ }
+ }
+ return nil, fmt.Errorf("empty export data for %s", path)
+}
+
+// Write writes encoded type information for the specified package to out.
+// The FileSet provides file position information for named objects.
+func Write(out io.Writer, fset *token.FileSet, pkg *types.Package) error {
+ if _, err := io.WriteString(out, "i"); err != nil {
+ return err
+ }
+ return gcimporter.IExportData(out, fset, pkg)
+}
+
+// ReadBundle reads an export bundle from in, decodes it, and returns type
+// information for the packages.
+// File position information is added to fset.
+//
+// ReadBundle may inspect and add to the imports map to ensure that references
+// within the export bundle to other packages are consistent.
+//
+// On return, the state of the reader is undefined.
+//
+// Experimental: This API is experimental and may change in the future.
+func ReadBundle(in io.Reader, fset *token.FileSet, imports map[string]*types.Package) ([]*types.Package, error) {
+ data, err := readAll(in)
+ if err != nil {
+ return nil, fmt.Errorf("reading export bundle: %v", err)
+ }
+ return gcimporter.IImportBundle(fset, imports, data)
+}
+
+// WriteBundle writes encoded type information for the specified packages to out.
+// The FileSet provides file position information for named objects.
+//
+// Experimental: This API is experimental and may change in the future.
+func WriteBundle(out io.Writer, fset *token.FileSet, pkgs []*types.Package) error {
+ return gcimporter.IExportBundle(out, fset, pkgs)
+}
diff --git a/vendor/golang.org/x/tools/go/gcexportdata/importer.go b/vendor/golang.org/x/tools/go/gcexportdata/importer.go
new file mode 100644
index 0000000000..37a7247e26
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/gcexportdata/importer.go
@@ -0,0 +1,75 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gcexportdata
+
+import (
+ "fmt"
+ "go/token"
+ "go/types"
+ "os"
+)
+
+// NewImporter returns a new instance of the types.Importer interface
+// that reads type information from export data files written by gc.
+// The Importer also satisfies types.ImporterFrom.
+//
+// Export data files are located using "go build" workspace conventions
+// and the build.Default context.
+//
+// Use this importer instead of go/importer.For("gc", ...) to avoid the
+// version-skew problems described in the documentation of this package,
+// or to control the FileSet or access the imports map populated during
+// package loading.
+//
+// Deprecated: Use the higher-level API in golang.org/x/tools/go/packages,
+// which is more efficient.
+func NewImporter(fset *token.FileSet, imports map[string]*types.Package) types.ImporterFrom {
+ return importer{fset, imports}
+}
+
+type importer struct {
+ fset *token.FileSet
+ imports map[string]*types.Package
+}
+
+func (imp importer) Import(importPath string) (*types.Package, error) {
+ return imp.ImportFrom(importPath, "", 0)
+}
+
+func (imp importer) ImportFrom(importPath, srcDir string, mode types.ImportMode) (_ *types.Package, err error) {
+ filename, path := Find(importPath, srcDir)
+ if filename == "" {
+ if importPath == "unsafe" {
+ // Even for unsafe, call Find first in case
+ // the package was vendored.
+ return types.Unsafe, nil
+ }
+ return nil, fmt.Errorf("can't find import: %s", importPath)
+ }
+
+ if pkg, ok := imp.imports[path]; ok && pkg.Complete() {
+ return pkg, nil // cache hit
+ }
+
+ // open file
+ f, err := os.Open(filename)
+ if err != nil {
+ return nil, err
+ }
+ defer func() {
+ f.Close()
+ if err != nil {
+ // add file name to error
+ err = fmt.Errorf("reading export data: %s: %v", filename, err)
+ }
+ }()
+
+ r, err := NewReader(f)
+ if err != nil {
+ return nil, err
+ }
+
+ return Read(r, imp.fset, imp.imports, path)
+}
diff --git a/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go b/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go
new file mode 100644
index 0000000000..333676b7cf
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go
@@ -0,0 +1,53 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package packagesdriver fetches type sizes for go/packages and go/analysis.
+package packagesdriver
+
+import (
+ "context"
+ "fmt"
+ "strings"
+
+ "golang.org/x/tools/internal/gocommand"
+)
+
+func GetSizesForArgsGolist(ctx context.Context, inv gocommand.Invocation, gocmdRunner *gocommand.Runner) (string, string, error) {
+ inv.Verb = "list"
+ inv.Args = []string{"-f", "{{context.GOARCH}} {{context.Compiler}}", "--", "unsafe"}
+ stdout, stderr, friendlyErr, rawErr := gocmdRunner.RunRaw(ctx, inv)
+ var goarch, compiler string
+ if rawErr != nil {
+ rawErrMsg := rawErr.Error()
+ if strings.Contains(rawErrMsg, "cannot find main module") ||
+ strings.Contains(rawErrMsg, "go.mod file not found") {
+ // User's running outside of a module.
+ // All bets are off. Get GOARCH and guess compiler is gc.
+ // TODO(matloob): Is this a problem in practice?
+ inv.Verb = "env"
+ inv.Args = []string{"GOARCH"}
+ envout, enverr := gocmdRunner.Run(ctx, inv)
+ if enverr != nil {
+ return "", "", enverr
+ }
+ goarch = strings.TrimSpace(envout.String())
+ compiler = "gc"
+ } else if friendlyErr != nil {
+ return "", "", friendlyErr
+ } else {
+ // This should be unreachable, but be defensive
+ // in case RunRaw's error results are inconsistent.
+ return "", "", rawErr
+ }
+ } else {
+ fields := strings.Fields(stdout.String())
+ if len(fields) < 2 {
+ return "", "", fmt.Errorf("could not parse GOARCH and Go compiler in format \" \":\nstdout: <<%s>>\nstderr: <<%s>>",
+ stdout.String(), stderr.String())
+ }
+ goarch = fields[0]
+ compiler = fields[1]
+ }
+ return compiler, goarch, nil
+}
diff --git a/vendor/golang.org/x/tools/go/packages/doc.go b/vendor/golang.org/x/tools/go/packages/doc.go
new file mode 100644
index 0000000000..b2a0b7c6a6
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/packages/doc.go
@@ -0,0 +1,240 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Package packages loads Go packages for inspection and analysis.
+
+The [Load] function takes as input a list of patterns and returns a
+list of [Package] values describing individual packages matched by those
+patterns.
+A [Config] specifies configuration options, the most important of which is
+the [LoadMode], which controls the amount of detail in the loaded packages.
+
+Load passes most patterns directly to the underlying build tool.
+The default build tool is the go command.
+Its supported patterns are described at
+https://pkg.go.dev/cmd/go#hdr-Package_lists_and_patterns.
+
+Load may be used in Go projects that use alternative build systems, by
+installing an appropriate "driver" program for the build system and
+specifying its location in the GOPACKAGESDRIVER environment variable.
+For example,
+https://github.com/bazelbuild/rules_go/wiki/Editor-and-tool-integration
+explains how to use the driver for Bazel.
+The driver program is responsible for interpreting patterns in its
+preferred notation and reporting information about the packages that
+they identify.
+(See driverRequest and driverResponse types for the JSON
+schema used by the protocol.
+Though the protocol is supported, these types are currently unexported;
+see #64608 for a proposal to publish them.)
+
+Regardless of driver, all patterns with the prefix "query=", where query is a
+non-empty string of letters from [a-z], are reserved and may be
+interpreted as query operators.
+
+Two query operators are currently supported: "file" and "pattern".
+
+The query "file=path/to/file.go" matches the package or packages enclosing
+the Go source file path/to/file.go. For example "file=~/go/src/fmt/print.go"
+might return the packages "fmt" and "fmt [fmt.test]".
+
+The query "pattern=string" causes "string" to be passed directly to
+the underlying build tool. In most cases this is unnecessary,
+but an application can use Load("pattern=" + x) as an escaping mechanism
+to ensure that x is not interpreted as a query operator if it contains '='.
+
+All other query operators are reserved for future use and currently
+cause Load to report an error.
+
+The Package struct provides basic information about the package, including
+
+ - ID, a unique identifier for the package in the returned set;
+ - GoFiles, the names of the package's Go source files;
+ - Imports, a map from source import strings to the Packages they name;
+ - Types, the type information for the package's exported symbols;
+ - Syntax, the parsed syntax trees for the package's source code; and
+ - TypesInfo, the result of a complete type-check of the package syntax trees.
+
+(See the documentation for type Package for the complete list of fields
+and more detailed descriptions.)
+
+For example,
+
+ Load(nil, "bytes", "unicode...")
+
+returns four Package structs describing the standard library packages
+bytes, unicode, unicode/utf16, and unicode/utf8. Note that one pattern
+can match multiple packages and that a package might be matched by
+multiple patterns: in general it is not possible to determine which
+packages correspond to which patterns.
+
+Note that the list returned by Load contains only the packages matched
+by the patterns. Their dependencies can be found by walking the import
+graph using the Imports fields.
+
+The Load function can be configured by passing a pointer to a Config as
+the first argument. A nil Config is equivalent to the zero Config, which
+causes Load to run in LoadFiles mode, collecting minimal information.
+See the documentation for type Config for details.
+
+As noted earlier, the Config.Mode controls the amount of detail
+reported about the loaded packages. See the documentation for type LoadMode
+for details.
+
+Most tools should pass their command-line arguments (after any flags)
+uninterpreted to [Load], so that it can interpret them
+according to the conventions of the underlying build system.
+See the Example function for typical usage.
+*/
+package packages // import "golang.org/x/tools/go/packages"
+
+/*
+
+Motivation and design considerations
+
+The new package's design solves problems addressed by two existing
+packages: go/build, which locates and describes packages, and
+golang.org/x/tools/go/loader, which loads, parses and type-checks them.
+The go/build.Package structure encodes too much of the 'go build' way
+of organizing projects, leaving us in need of a data type that describes a
+package of Go source code independent of the underlying build system.
+We wanted something that works equally well with go build and vgo, and
+also other build systems such as Bazel and Blaze, making it possible to
+construct analysis tools that work in all these environments.
+Tools such as errcheck and staticcheck were essentially unavailable to
+the Go community at Google, and some of Google's internal tools for Go
+are unavailable externally.
+This new package provides a uniform way to obtain package metadata by
+querying each of these build systems, optionally supporting their
+preferred command-line notations for packages, so that tools integrate
+neatly with users' build environments. The Metadata query function
+executes an external query tool appropriate to the current workspace.
+
+Loading packages always returns the complete import graph "all the way down",
+even if all you want is information about a single package, because the query
+mechanisms of all the build systems we currently support ({go,vgo} list, and
+blaze/bazel aspect-based query) cannot provide detailed information
+about one package without visiting all its dependencies too, so there is
+no additional asymptotic cost to providing transitive information.
+(This property might not be true of a hypothetical 5th build system.)
+
+In calls to TypeCheck, all initial packages, and any package that
+transitively depends on one of them, must be loaded from source.
+Consider A->B->C->D->E: if A,C are initial, A,B,C must be loaded from
+source; D may be loaded from export data, and E may not be loaded at all
+(though it's possible that D's export data mentions it, so a
+types.Package may be created for it and exposed.)
+
+The old loader had a feature to suppress type-checking of function
+bodies on a per-package basis, primarily intended to reduce the work of
+obtaining type information for imported packages. Now that imports are
+satisfied by export data, the optimization no longer seems necessary.
+
+Despite some early attempts, the old loader did not exploit export data,
+instead always using the equivalent of WholeProgram mode. This was due
+to the complexity of mixing source and export data packages (now
+resolved by the upward traversal mentioned above), and because export data
+files were nearly always missing or stale. Now that 'go build' supports
+caching, all the underlying build systems can guarantee to produce
+export data in a reasonable (amortized) time.
+
+Test "main" packages synthesized by the build system are now reported as
+first-class packages, avoiding the need for clients (such as go/ssa) to
+reinvent this generation logic.
+
+One way in which go/packages is simpler than the old loader is in its
+treatment of in-package tests. In-package tests are packages that
+consist of all the files of the library under test, plus the test files.
+The old loader constructed in-package tests by a two-phase process of
+mutation called "augmentation": first it would construct and type check
+all the ordinary library packages and type-check the packages that
+depend on them; then it would add more (test) files to the package and
+type-check again. This two-phase approach had four major problems:
+1) in processing the tests, the loader modified the library package,
+ leaving no way for a client application to see both the test
+ package and the library package; one would mutate into the other.
+2) because test files can declare additional methods on types defined in
+ the library portion of the package, the dispatch of method calls in
+ the library portion was affected by the presence of the test files.
+ This should have been a clue that the packages were logically
+ different.
+3) this model of "augmentation" assumed at most one in-package test
+ per library package, which is true of projects using 'go build',
+ but not other build systems.
+4) because of the two-phase nature of test processing, all packages that
+ import the library package had to be processed before augmentation,
+ forcing a "one-shot" API and preventing the client from calling Load
+ in several times in sequence as is now possible in WholeProgram mode.
+ (TypeCheck mode has a similar one-shot restriction for a different reason.)
+
+Early drafts of this package supported "multi-shot" operation.
+Although it allowed clients to make a sequence of calls (or concurrent
+calls) to Load, building up the graph of Packages incrementally,
+it was of marginal value: it complicated the API
+(since it allowed some options to vary across calls but not others),
+it complicated the implementation,
+it cannot be made to work in Types mode, as explained above,
+and it was less efficient than making one combined call (when this is possible).
+Among the clients we have inspected, none made multiple calls to load
+but could not be easily and satisfactorily modified to make only a single call.
+However, applications changes may be required.
+For example, the ssadump command loads the user-specified packages
+and in addition the runtime package. It is tempting to simply append
+"runtime" to the user-provided list, but that does not work if the user
+specified an ad-hoc package such as [a.go b.go].
+Instead, ssadump no longer requests the runtime package,
+but seeks it among the dependencies of the user-specified packages,
+and emits an error if it is not found.
+
+Overlays: The Overlay field in the Config allows providing alternate contents
+for Go source files, by providing a mapping from file path to contents.
+go/packages will pull in new imports added in overlay files when go/packages
+is run in LoadImports mode or greater.
+Overlay support for the go list driver isn't complete yet: if the file doesn't
+exist on disk, it will only be recognized in an overlay if it is a non-test file
+and the package would be reported even without the overlay.
+
+Questions & Tasks
+
+- Add GOARCH/GOOS?
+ They are not portable concepts, but could be made portable.
+ Our goal has been to allow users to express themselves using the conventions
+ of the underlying build system: if the build system honors GOARCH
+ during a build and during a metadata query, then so should
+ applications built atop that query mechanism.
+ Conversely, if the target architecture of the build is determined by
+ command-line flags, the application can pass the relevant
+ flags through to the build system using a command such as:
+ myapp -query_flag="--cpu=amd64" -query_flag="--os=darwin"
+ However, this approach is low-level, unwieldy, and non-portable.
+ GOOS and GOARCH seem important enough to warrant a dedicated option.
+
+- How should we handle partial failures such as a mixture of good and
+ malformed patterns, existing and non-existent packages, successful and
+ failed builds, import failures, import cycles, and so on, in a call to
+ Load?
+
+- Support bazel, blaze, and go1.10 list, not just go1.11 list.
+
+- Handle (and test) various partial success cases, e.g.
+ a mixture of good packages and:
+ invalid patterns
+ nonexistent packages
+ empty packages
+ packages with malformed package or import declarations
+ unreadable files
+ import cycles
+ other parse errors
+ type errors
+ Make sure we record errors at the correct place in the graph.
+
+- Missing packages among initial arguments are not reported.
+ Return bogus packages for them, like golist does.
+
+- "undeclared name" errors (for example) are reported out of source file
+ order. I suspect this is due to the breadth-first resolution now used
+ by go/types. Is that a bug? Discuss with gri.
+
+*/
diff --git a/vendor/golang.org/x/tools/go/packages/external.go b/vendor/golang.org/x/tools/go/packages/external.go
new file mode 100644
index 0000000000..7db1d1293a
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/packages/external.go
@@ -0,0 +1,101 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file enables an external tool to intercept package requests.
+// If the tool is present then its results are used in preference to
+// the go list command.
+
+package packages
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "os"
+ "os/exec"
+ "strings"
+)
+
+// The Driver Protocol
+//
+// The driver, given the inputs to a call to Load, returns metadata about the packages specified.
+// This allows for different build systems to support go/packages by telling go/packages how the
+// packages' source is organized.
+// The driver is a binary, either specified by the GOPACKAGESDRIVER environment variable or in
+// the path as gopackagesdriver. It's given the inputs to load in its argv. See the package
+// documentation in doc.go for the full description of the patterns that need to be supported.
+// A driver receives as a JSON-serialized driverRequest struct in standard input and will
+// produce a JSON-serialized driverResponse (see definition in packages.go) in its standard output.
+
+// driverRequest is used to provide the portion of Load's Config that is needed by a driver.
+type driverRequest struct {
+ Mode LoadMode `json:"mode"`
+ // Env specifies the environment the underlying build system should be run in.
+ Env []string `json:"env"`
+ // BuildFlags are flags that should be passed to the underlying build system.
+ BuildFlags []string `json:"build_flags"`
+ // Tests specifies whether the patterns should also return test packages.
+ Tests bool `json:"tests"`
+ // Overlay maps file paths (relative to the driver's working directory) to the byte contents
+ // of overlay files.
+ Overlay map[string][]byte `json:"overlay"`
+}
+
+// findExternalDriver returns the file path of a tool that supplies
+// the build system package structure, or "" if not found."
+// If GOPACKAGESDRIVER is set in the environment findExternalTool returns its
+// value, otherwise it searches for a binary named gopackagesdriver on the PATH.
+func findExternalDriver(cfg *Config) driver {
+ const toolPrefix = "GOPACKAGESDRIVER="
+ tool := ""
+ for _, env := range cfg.Env {
+ if val := strings.TrimPrefix(env, toolPrefix); val != env {
+ tool = val
+ }
+ }
+ if tool != "" && tool == "off" {
+ return nil
+ }
+ if tool == "" {
+ var err error
+ tool, err = exec.LookPath("gopackagesdriver")
+ if err != nil {
+ return nil
+ }
+ }
+ return func(cfg *Config, words ...string) (*driverResponse, error) {
+ req, err := json.Marshal(driverRequest{
+ Mode: cfg.Mode,
+ Env: cfg.Env,
+ BuildFlags: cfg.BuildFlags,
+ Tests: cfg.Tests,
+ Overlay: cfg.Overlay,
+ })
+ if err != nil {
+ return nil, fmt.Errorf("failed to encode message to driver tool: %v", err)
+ }
+
+ buf := new(bytes.Buffer)
+ stderr := new(bytes.Buffer)
+ cmd := exec.CommandContext(cfg.Context, tool, words...)
+ cmd.Dir = cfg.Dir
+ cmd.Env = cfg.Env
+ cmd.Stdin = bytes.NewReader(req)
+ cmd.Stdout = buf
+ cmd.Stderr = stderr
+
+ if err := cmd.Run(); err != nil {
+ return nil, fmt.Errorf("%v: %v: %s", tool, err, cmd.Stderr)
+ }
+ if len(stderr.Bytes()) != 0 && os.Getenv("GOPACKAGESPRINTDRIVERERRORS") != "" {
+ fmt.Fprintf(os.Stderr, "%s stderr: <<%s>>\n", cmdDebugStr(cmd), stderr)
+ }
+
+ var response driverResponse
+ if err := json.Unmarshal(buf.Bytes(), &response); err != nil {
+ return nil, err
+ }
+ return &response, nil
+ }
+}
diff --git a/vendor/golang.org/x/tools/go/packages/golist.go b/vendor/golang.org/x/tools/go/packages/golist.go
new file mode 100644
index 0000000000..cd375fbc3c
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/packages/golist.go
@@ -0,0 +1,1107 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package packages
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "fmt"
+ "log"
+ "os"
+ "os/exec"
+ "path"
+ "path/filepath"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "unicode"
+
+ "golang.org/x/tools/go/internal/packagesdriver"
+ "golang.org/x/tools/internal/gocommand"
+ "golang.org/x/tools/internal/packagesinternal"
+)
+
+// debug controls verbose logging.
+var debug, _ = strconv.ParseBool(os.Getenv("GOPACKAGESDEBUG"))
+
+// A goTooOldError reports that the go command
+// found by exec.LookPath is too old to use the new go list behavior.
+type goTooOldError struct {
+ error
+}
+
+// responseDeduper wraps a driverResponse, deduplicating its contents.
+type responseDeduper struct {
+ seenRoots map[string]bool
+ seenPackages map[string]*Package
+ dr *driverResponse
+}
+
+func newDeduper() *responseDeduper {
+ return &responseDeduper{
+ dr: &driverResponse{},
+ seenRoots: map[string]bool{},
+ seenPackages: map[string]*Package{},
+ }
+}
+
+// addAll fills in r with a driverResponse.
+func (r *responseDeduper) addAll(dr *driverResponse) {
+ for _, pkg := range dr.Packages {
+ r.addPackage(pkg)
+ }
+ for _, root := range dr.Roots {
+ r.addRoot(root)
+ }
+ r.dr.GoVersion = dr.GoVersion
+}
+
+func (r *responseDeduper) addPackage(p *Package) {
+ if r.seenPackages[p.ID] != nil {
+ return
+ }
+ r.seenPackages[p.ID] = p
+ r.dr.Packages = append(r.dr.Packages, p)
+}
+
+func (r *responseDeduper) addRoot(id string) {
+ if r.seenRoots[id] {
+ return
+ }
+ r.seenRoots[id] = true
+ r.dr.Roots = append(r.dr.Roots, id)
+}
+
+type golistState struct {
+ cfg *Config
+ ctx context.Context
+
+ envOnce sync.Once
+ goEnvError error
+ goEnv map[string]string
+
+ rootsOnce sync.Once
+ rootDirsError error
+ rootDirs map[string]string
+
+ goVersionOnce sync.Once
+ goVersionError error
+ goVersion int // The X in Go 1.X.
+
+ // vendorDirs caches the (non)existence of vendor directories.
+ vendorDirs map[string]bool
+}
+
+// getEnv returns Go environment variables. Only specific variables are
+// populated -- computing all of them is slow.
+func (state *golistState) getEnv() (map[string]string, error) {
+ state.envOnce.Do(func() {
+ var b *bytes.Buffer
+ b, state.goEnvError = state.invokeGo("env", "-json", "GOMOD", "GOPATH")
+ if state.goEnvError != nil {
+ return
+ }
+
+ state.goEnv = make(map[string]string)
+ decoder := json.NewDecoder(b)
+ if state.goEnvError = decoder.Decode(&state.goEnv); state.goEnvError != nil {
+ return
+ }
+ })
+ return state.goEnv, state.goEnvError
+}
+
+// mustGetEnv is a convenience function that can be used if getEnv has already succeeded.
+func (state *golistState) mustGetEnv() map[string]string {
+ env, err := state.getEnv()
+ if err != nil {
+ panic(fmt.Sprintf("mustGetEnv: %v", err))
+ }
+ return env
+}
+
+// goListDriver uses the go list command to interpret the patterns and produce
+// the build system package structure.
+// See driver for more details.
+func goListDriver(cfg *Config, patterns ...string) (*driverResponse, error) {
+ // Make sure that any asynchronous go commands are killed when we return.
+ parentCtx := cfg.Context
+ if parentCtx == nil {
+ parentCtx = context.Background()
+ }
+ ctx, cancel := context.WithCancel(parentCtx)
+ defer cancel()
+
+ response := newDeduper()
+
+ state := &golistState{
+ cfg: cfg,
+ ctx: ctx,
+ vendorDirs: map[string]bool{},
+ }
+
+ // Fill in response.Sizes asynchronously if necessary.
+ var sizeserr error
+ var sizeswg sync.WaitGroup
+ if cfg.Mode&NeedTypesSizes != 0 || cfg.Mode&NeedTypes != 0 {
+ sizeswg.Add(1)
+ go func() {
+ compiler, arch, err := packagesdriver.GetSizesForArgsGolist(ctx, state.cfgInvocation(), cfg.gocmdRunner)
+ sizeserr = err
+ response.dr.Compiler = compiler
+ response.dr.Arch = arch
+ sizeswg.Done()
+ }()
+ }
+
+ // Determine files requested in contains patterns
+ var containFiles []string
+ restPatterns := make([]string, 0, len(patterns))
+ // Extract file= and other [querytype]= patterns. Report an error if querytype
+ // doesn't exist.
+extractQueries:
+ for _, pattern := range patterns {
+ eqidx := strings.Index(pattern, "=")
+ if eqidx < 0 {
+ restPatterns = append(restPatterns, pattern)
+ } else {
+ query, value := pattern[:eqidx], pattern[eqidx+len("="):]
+ switch query {
+ case "file":
+ containFiles = append(containFiles, value)
+ case "pattern":
+ restPatterns = append(restPatterns, value)
+ case "": // not a reserved query
+ restPatterns = append(restPatterns, pattern)
+ default:
+ for _, rune := range query {
+ if rune < 'a' || rune > 'z' { // not a reserved query
+ restPatterns = append(restPatterns, pattern)
+ continue extractQueries
+ }
+ }
+ // Reject all other patterns containing "="
+ return nil, fmt.Errorf("invalid query type %q in query pattern %q", query, pattern)
+ }
+ }
+ }
+
+ // See if we have any patterns to pass through to go list. Zero initial
+ // patterns also requires a go list call, since it's the equivalent of
+ // ".".
+ if len(restPatterns) > 0 || len(patterns) == 0 {
+ dr, err := state.createDriverResponse(restPatterns...)
+ if err != nil {
+ return nil, err
+ }
+ response.addAll(dr)
+ }
+
+ if len(containFiles) != 0 {
+ if err := state.runContainsQueries(response, containFiles); err != nil {
+ return nil, err
+ }
+ }
+
+ sizeswg.Wait()
+ if sizeserr != nil {
+ return nil, sizeserr
+ }
+ return response.dr, nil
+}
+
+func (state *golistState) runContainsQueries(response *responseDeduper, queries []string) error {
+ for _, query := range queries {
+ // TODO(matloob): Do only one query per directory.
+ fdir := filepath.Dir(query)
+ // Pass absolute path of directory to go list so that it knows to treat it as a directory,
+ // not a package path.
+ pattern, err := filepath.Abs(fdir)
+ if err != nil {
+ return fmt.Errorf("could not determine absolute path of file= query path %q: %v", query, err)
+ }
+ dirResponse, err := state.createDriverResponse(pattern)
+
+ // If there was an error loading the package, or no packages are returned,
+ // or the package is returned with errors, try to load the file as an
+ // ad-hoc package.
+ // Usually the error will appear in a returned package, but may not if we're
+ // in module mode and the ad-hoc is located outside a module.
+ if err != nil || len(dirResponse.Packages) == 0 || len(dirResponse.Packages) == 1 && len(dirResponse.Packages[0].GoFiles) == 0 &&
+ len(dirResponse.Packages[0].Errors) == 1 {
+ var queryErr error
+ if dirResponse, queryErr = state.adhocPackage(pattern, query); queryErr != nil {
+ return err // return the original error
+ }
+ }
+ isRoot := make(map[string]bool, len(dirResponse.Roots))
+ for _, root := range dirResponse.Roots {
+ isRoot[root] = true
+ }
+ for _, pkg := range dirResponse.Packages {
+ // Add any new packages to the main set
+ // We don't bother to filter packages that will be dropped by the changes of roots,
+ // that will happen anyway during graph construction outside this function.
+ // Over-reporting packages is not a problem.
+ response.addPackage(pkg)
+ // if the package was not a root one, it cannot have the file
+ if !isRoot[pkg.ID] {
+ continue
+ }
+ for _, pkgFile := range pkg.GoFiles {
+ if filepath.Base(query) == filepath.Base(pkgFile) {
+ response.addRoot(pkg.ID)
+ break
+ }
+ }
+ }
+ }
+ return nil
+}
+
+// adhocPackage attempts to load or construct an ad-hoc package for a given
+// query, if the original call to the driver produced inadequate results.
+func (state *golistState) adhocPackage(pattern, query string) (*driverResponse, error) {
+ response, err := state.createDriverResponse(query)
+ if err != nil {
+ return nil, err
+ }
+ // If we get nothing back from `go list`,
+ // try to make this file into its own ad-hoc package.
+ // TODO(rstambler): Should this check against the original response?
+ if len(response.Packages) == 0 {
+ response.Packages = append(response.Packages, &Package{
+ ID: "command-line-arguments",
+ PkgPath: query,
+ GoFiles: []string{query},
+ CompiledGoFiles: []string{query},
+ Imports: make(map[string]*Package),
+ })
+ response.Roots = append(response.Roots, "command-line-arguments")
+ }
+ // Handle special cases.
+ if len(response.Packages) == 1 {
+ // golang/go#33482: If this is a file= query for ad-hoc packages where
+ // the file only exists on an overlay, and exists outside of a module,
+ // add the file to the package and remove the errors.
+ if response.Packages[0].ID == "command-line-arguments" ||
+ filepath.ToSlash(response.Packages[0].PkgPath) == filepath.ToSlash(query) {
+ if len(response.Packages[0].GoFiles) == 0 {
+ filename := filepath.Join(pattern, filepath.Base(query)) // avoid recomputing abspath
+ // TODO(matloob): check if the file is outside of a root dir?
+ for path := range state.cfg.Overlay {
+ if path == filename {
+ response.Packages[0].Errors = nil
+ response.Packages[0].GoFiles = []string{path}
+ response.Packages[0].CompiledGoFiles = []string{path}
+ }
+ }
+ }
+ }
+ }
+ return response, nil
+}
+
+// Fields must match go list;
+// see $GOROOT/src/cmd/go/internal/load/pkg.go.
+type jsonPackage struct {
+ ImportPath string
+ Dir string
+ Name string
+ Export string
+ GoFiles []string
+ CompiledGoFiles []string
+ IgnoredGoFiles []string
+ IgnoredOtherFiles []string
+ EmbedPatterns []string
+ EmbedFiles []string
+ CFiles []string
+ CgoFiles []string
+ CXXFiles []string
+ MFiles []string
+ HFiles []string
+ FFiles []string
+ SFiles []string
+ SwigFiles []string
+ SwigCXXFiles []string
+ SysoFiles []string
+ Imports []string
+ ImportMap map[string]string
+ Deps []string
+ Module *Module
+ TestGoFiles []string
+ TestImports []string
+ XTestGoFiles []string
+ XTestImports []string
+ ForTest string // q in a "p [q.test]" package, else ""
+ DepOnly bool
+
+ Error *packagesinternal.PackageError
+ DepsErrors []*packagesinternal.PackageError
+}
+
+type jsonPackageError struct {
+ ImportStack []string
+ Pos string
+ Err string
+}
+
+func otherFiles(p *jsonPackage) [][]string {
+ return [][]string{p.CFiles, p.CXXFiles, p.MFiles, p.HFiles, p.FFiles, p.SFiles, p.SwigFiles, p.SwigCXXFiles, p.SysoFiles}
+}
+
+// createDriverResponse uses the "go list" command to expand the pattern
+// words and return a response for the specified packages.
+func (state *golistState) createDriverResponse(words ...string) (*driverResponse, error) {
+ // go list uses the following identifiers in ImportPath and Imports:
+ //
+ // "p" -- importable package or main (command)
+ // "q.test" -- q's test executable
+ // "p [q.test]" -- variant of p as built for q's test executable
+ // "q_test [q.test]" -- q's external test package
+ //
+ // The packages p that are built differently for a test q.test
+ // are q itself, plus any helpers used by the external test q_test,
+ // typically including "testing" and all its dependencies.
+
+ // Run "go list" for complete
+ // information on the specified packages.
+ goVersion, err := state.getGoVersion()
+ if err != nil {
+ return nil, err
+ }
+ buf, err := state.invokeGo("list", golistargs(state.cfg, words, goVersion)...)
+ if err != nil {
+ return nil, err
+ }
+
+ seen := make(map[string]*jsonPackage)
+ pkgs := make(map[string]*Package)
+ additionalErrors := make(map[string][]Error)
+ // Decode the JSON and convert it to Package form.
+ response := &driverResponse{
+ GoVersion: goVersion,
+ }
+ for dec := json.NewDecoder(buf); dec.More(); {
+ p := new(jsonPackage)
+ if err := dec.Decode(p); err != nil {
+ return nil, fmt.Errorf("JSON decoding failed: %v", err)
+ }
+
+ if p.ImportPath == "" {
+ // The documentation for go list says that “[e]rroneous packages will have
+ // a non-empty ImportPath”. If for some reason it comes back empty, we
+ // prefer to error out rather than silently discarding data or handing
+ // back a package without any way to refer to it.
+ if p.Error != nil {
+ return nil, Error{
+ Pos: p.Error.Pos,
+ Msg: p.Error.Err,
+ }
+ }
+ return nil, fmt.Errorf("package missing import path: %+v", p)
+ }
+
+ // Work around https://golang.org/issue/33157:
+ // go list -e, when given an absolute path, will find the package contained at
+ // that directory. But when no package exists there, it will return a fake package
+ // with an error and the ImportPath set to the absolute path provided to go list.
+ // Try to convert that absolute path to what its package path would be if it's
+ // contained in a known module or GOPATH entry. This will allow the package to be
+ // properly "reclaimed" when overlays are processed.
+ if filepath.IsAbs(p.ImportPath) && p.Error != nil {
+ pkgPath, ok, err := state.getPkgPath(p.ImportPath)
+ if err != nil {
+ return nil, err
+ }
+ if ok {
+ p.ImportPath = pkgPath
+ }
+ }
+
+ if old, found := seen[p.ImportPath]; found {
+ // If one version of the package has an error, and the other doesn't, assume
+ // that this is a case where go list is reporting a fake dependency variant
+ // of the imported package: When a package tries to invalidly import another
+ // package, go list emits a variant of the imported package (with the same
+ // import path, but with an error on it, and the package will have a
+ // DepError set on it). An example of when this can happen is for imports of
+ // main packages: main packages can not be imported, but they may be
+ // separately matched and listed by another pattern.
+ // See golang.org/issue/36188 for more details.
+
+ // The plan is that eventually, hopefully in Go 1.15, the error will be
+ // reported on the importing package rather than the duplicate "fake"
+ // version of the imported package. Once all supported versions of Go
+ // have the new behavior this logic can be deleted.
+ // TODO(matloob): delete the workaround logic once all supported versions of
+ // Go return the errors on the proper package.
+
+ // There should be exactly one version of a package that doesn't have an
+ // error.
+ if old.Error == nil && p.Error == nil {
+ if !reflect.DeepEqual(p, old) {
+ return nil, fmt.Errorf("internal error: go list gives conflicting information for package %v", p.ImportPath)
+ }
+ continue
+ }
+
+ // Determine if this package's error needs to be bubbled up.
+ // This is a hack, and we expect for go list to eventually set the error
+ // on the package.
+ if old.Error != nil {
+ var errkind string
+ if strings.Contains(old.Error.Err, "not an importable package") {
+ errkind = "not an importable package"
+ } else if strings.Contains(old.Error.Err, "use of internal package") && strings.Contains(old.Error.Err, "not allowed") {
+ errkind = "use of internal package not allowed"
+ }
+ if errkind != "" {
+ if len(old.Error.ImportStack) < 1 {
+ return nil, fmt.Errorf(`internal error: go list gave a %q error with empty import stack`, errkind)
+ }
+ importingPkg := old.Error.ImportStack[len(old.Error.ImportStack)-1]
+ if importingPkg == old.ImportPath {
+ // Using an older version of Go which put this package itself on top of import
+ // stack, instead of the importer. Look for importer in second from top
+ // position.
+ if len(old.Error.ImportStack) < 2 {
+ return nil, fmt.Errorf(`internal error: go list gave a %q error with an import stack without importing package`, errkind)
+ }
+ importingPkg = old.Error.ImportStack[len(old.Error.ImportStack)-2]
+ }
+ additionalErrors[importingPkg] = append(additionalErrors[importingPkg], Error{
+ Pos: old.Error.Pos,
+ Msg: old.Error.Err,
+ Kind: ListError,
+ })
+ }
+ }
+
+ // Make sure that if there's a version of the package without an error,
+ // that's the one reported to the user.
+ if old.Error == nil {
+ continue
+ }
+
+ // This package will replace the old one at the end of the loop.
+ }
+ seen[p.ImportPath] = p
+
+ pkg := &Package{
+ Name: p.Name,
+ ID: p.ImportPath,
+ GoFiles: absJoin(p.Dir, p.GoFiles, p.CgoFiles),
+ CompiledGoFiles: absJoin(p.Dir, p.CompiledGoFiles),
+ OtherFiles: absJoin(p.Dir, otherFiles(p)...),
+ EmbedFiles: absJoin(p.Dir, p.EmbedFiles),
+ EmbedPatterns: absJoin(p.Dir, p.EmbedPatterns),
+ IgnoredFiles: absJoin(p.Dir, p.IgnoredGoFiles, p.IgnoredOtherFiles),
+ forTest: p.ForTest,
+ depsErrors: p.DepsErrors,
+ Module: p.Module,
+ }
+
+ if (state.cfg.Mode&typecheckCgo) != 0 && len(p.CgoFiles) != 0 {
+ if len(p.CompiledGoFiles) > len(p.GoFiles) {
+ // We need the cgo definitions, which are in the first
+ // CompiledGoFile after the non-cgo ones. This is a hack but there
+ // isn't currently a better way to find it. We also need the pure
+ // Go files and unprocessed cgo files, all of which are already
+ // in pkg.GoFiles.
+ cgoTypes := p.CompiledGoFiles[len(p.GoFiles)]
+ pkg.CompiledGoFiles = append([]string{cgoTypes}, pkg.GoFiles...)
+ } else {
+ // golang/go#38990: go list silently fails to do cgo processing
+ pkg.CompiledGoFiles = nil
+ pkg.Errors = append(pkg.Errors, Error{
+ Msg: "go list failed to return CompiledGoFiles. This may indicate failure to perform cgo processing; try building at the command line. See https://golang.org/issue/38990.",
+ Kind: ListError,
+ })
+ }
+ }
+
+ // Work around https://golang.org/issue/28749:
+ // cmd/go puts assembly, C, and C++ files in CompiledGoFiles.
+ // Remove files from CompiledGoFiles that are non-go files
+ // (or are not files that look like they are from the cache).
+ if len(pkg.CompiledGoFiles) > 0 {
+ out := pkg.CompiledGoFiles[:0]
+ for _, f := range pkg.CompiledGoFiles {
+ if ext := filepath.Ext(f); ext != ".go" && ext != "" { // ext == "" means the file is from the cache, so probably cgo-processed file
+ continue
+ }
+ out = append(out, f)
+ }
+ pkg.CompiledGoFiles = out
+ }
+
+ // Extract the PkgPath from the package's ID.
+ if i := strings.IndexByte(pkg.ID, ' '); i >= 0 {
+ pkg.PkgPath = pkg.ID[:i]
+ } else {
+ pkg.PkgPath = pkg.ID
+ }
+
+ if pkg.PkgPath == "unsafe" {
+ pkg.CompiledGoFiles = nil // ignore fake unsafe.go file (#59929)
+ } else if len(pkg.CompiledGoFiles) == 0 {
+ // Work around for pre-go.1.11 versions of go list.
+ // TODO(matloob): they should be handled by the fallback.
+ // Can we delete this?
+ pkg.CompiledGoFiles = pkg.GoFiles
+ }
+
+ // Assume go list emits only absolute paths for Dir.
+ if p.Dir != "" && !filepath.IsAbs(p.Dir) {
+ log.Fatalf("internal error: go list returned non-absolute Package.Dir: %s", p.Dir)
+ }
+
+ if p.Export != "" && !filepath.IsAbs(p.Export) {
+ pkg.ExportFile = filepath.Join(p.Dir, p.Export)
+ } else {
+ pkg.ExportFile = p.Export
+ }
+
+ // imports
+ //
+ // Imports contains the IDs of all imported packages.
+ // ImportsMap records (path, ID) only where they differ.
+ ids := make(map[string]bool)
+ for _, id := range p.Imports {
+ ids[id] = true
+ }
+ pkg.Imports = make(map[string]*Package)
+ for path, id := range p.ImportMap {
+ pkg.Imports[path] = &Package{ID: id} // non-identity import
+ delete(ids, id)
+ }
+ for id := range ids {
+ if id == "C" {
+ continue
+ }
+
+ pkg.Imports[id] = &Package{ID: id} // identity import
+ }
+ if !p.DepOnly {
+ response.Roots = append(response.Roots, pkg.ID)
+ }
+
+ // Temporary work-around for golang/go#39986. Parse filenames out of
+ // error messages. This happens if there are unrecoverable syntax
+ // errors in the source, so we can't match on a specific error message.
+ //
+ // TODO(rfindley): remove this heuristic, in favor of considering
+ // InvalidGoFiles from the list driver.
+ if err := p.Error; err != nil && state.shouldAddFilenameFromError(p) {
+ addFilenameFromPos := func(pos string) bool {
+ split := strings.Split(pos, ":")
+ if len(split) < 1 {
+ return false
+ }
+ filename := strings.TrimSpace(split[0])
+ if filename == "" {
+ return false
+ }
+ if !filepath.IsAbs(filename) {
+ filename = filepath.Join(state.cfg.Dir, filename)
+ }
+ info, _ := os.Stat(filename)
+ if info == nil {
+ return false
+ }
+ pkg.CompiledGoFiles = append(pkg.CompiledGoFiles, filename)
+ pkg.GoFiles = append(pkg.GoFiles, filename)
+ return true
+ }
+ found := addFilenameFromPos(err.Pos)
+ // In some cases, go list only reports the error position in the
+ // error text, not the error position. One such case is when the
+ // file's package name is a keyword (see golang.org/issue/39763).
+ if !found {
+ addFilenameFromPos(err.Err)
+ }
+ }
+
+ if p.Error != nil {
+ msg := strings.TrimSpace(p.Error.Err) // Trim to work around golang.org/issue/32363.
+ // Address golang.org/issue/35964 by appending import stack to error message.
+ if msg == "import cycle not allowed" && len(p.Error.ImportStack) != 0 {
+ msg += fmt.Sprintf(": import stack: %v", p.Error.ImportStack)
+ }
+ pkg.Errors = append(pkg.Errors, Error{
+ Pos: p.Error.Pos,
+ Msg: msg,
+ Kind: ListError,
+ })
+ }
+
+ pkgs[pkg.ID] = pkg
+ }
+
+ for id, errs := range additionalErrors {
+ if p, ok := pkgs[id]; ok {
+ p.Errors = append(p.Errors, errs...)
+ }
+ }
+ for _, pkg := range pkgs {
+ response.Packages = append(response.Packages, pkg)
+ }
+ sort.Slice(response.Packages, func(i, j int) bool { return response.Packages[i].ID < response.Packages[j].ID })
+
+ return response, nil
+}
+
+func (state *golistState) shouldAddFilenameFromError(p *jsonPackage) bool {
+ if len(p.GoFiles) > 0 || len(p.CompiledGoFiles) > 0 {
+ return false
+ }
+
+ goV, err := state.getGoVersion()
+ if err != nil {
+ return false
+ }
+
+ // On Go 1.14 and earlier, only add filenames from errors if the import stack is empty.
+ // The import stack behaves differently for these versions than newer Go versions.
+ if goV < 15 {
+ return len(p.Error.ImportStack) == 0
+ }
+
+ // On Go 1.15 and later, only parse filenames out of error if there's no import stack,
+ // or the current package is at the top of the import stack. This is not guaranteed
+ // to work perfectly, but should avoid some cases where files in errors don't belong to this
+ // package.
+ return len(p.Error.ImportStack) == 0 || p.Error.ImportStack[len(p.Error.ImportStack)-1] == p.ImportPath
+}
+
+// getGoVersion returns the effective minor version of the go command.
+func (state *golistState) getGoVersion() (int, error) {
+ state.goVersionOnce.Do(func() {
+ state.goVersion, state.goVersionError = gocommand.GoVersion(state.ctx, state.cfgInvocation(), state.cfg.gocmdRunner)
+ })
+ return state.goVersion, state.goVersionError
+}
+
+// getPkgPath finds the package path of a directory if it's relative to a root
+// directory.
+func (state *golistState) getPkgPath(dir string) (string, bool, error) {
+ absDir, err := filepath.Abs(dir)
+ if err != nil {
+ return "", false, err
+ }
+ roots, err := state.determineRootDirs()
+ if err != nil {
+ return "", false, err
+ }
+
+ for rdir, rpath := range roots {
+ // Make sure that the directory is in the module,
+ // to avoid creating a path relative to another module.
+ if !strings.HasPrefix(absDir, rdir) {
+ continue
+ }
+ // TODO(matloob): This doesn't properly handle symlinks.
+ r, err := filepath.Rel(rdir, dir)
+ if err != nil {
+ continue
+ }
+ if rpath != "" {
+ // We choose only one root even though the directory even it can belong in multiple modules
+ // or GOPATH entries. This is okay because we only need to work with absolute dirs when a
+ // file is missing from disk, for instance when gopls calls go/packages in an overlay.
+ // Once the file is saved, gopls, or the next invocation of the tool will get the correct
+ // result straight from golist.
+ // TODO(matloob): Implement module tiebreaking?
+ return path.Join(rpath, filepath.ToSlash(r)), true, nil
+ }
+ return filepath.ToSlash(r), true, nil
+ }
+ return "", false, nil
+}
+
+// absJoin absolutizes and flattens the lists of files.
+func absJoin(dir string, fileses ...[]string) (res []string) {
+ for _, files := range fileses {
+ for _, file := range files {
+ if !filepath.IsAbs(file) {
+ file = filepath.Join(dir, file)
+ }
+ res = append(res, file)
+ }
+ }
+ return res
+}
+
+func jsonFlag(cfg *Config, goVersion int) string {
+ if goVersion < 19 {
+ return "-json"
+ }
+ var fields []string
+ added := make(map[string]bool)
+ addFields := func(fs ...string) {
+ for _, f := range fs {
+ if !added[f] {
+ added[f] = true
+ fields = append(fields, f)
+ }
+ }
+ }
+ addFields("Name", "ImportPath", "Error") // These fields are always needed
+ if cfg.Mode&NeedFiles != 0 || cfg.Mode&NeedTypes != 0 {
+ addFields("Dir", "GoFiles", "IgnoredGoFiles", "IgnoredOtherFiles", "CFiles",
+ "CgoFiles", "CXXFiles", "MFiles", "HFiles", "FFiles", "SFiles",
+ "SwigFiles", "SwigCXXFiles", "SysoFiles")
+ if cfg.Tests {
+ addFields("TestGoFiles", "XTestGoFiles")
+ }
+ }
+ if cfg.Mode&NeedTypes != 0 {
+ // CompiledGoFiles seems to be required for the test case TestCgoNoSyntax,
+ // even when -compiled isn't passed in.
+ // TODO(#52435): Should we make the test ask for -compiled, or automatically
+ // request CompiledGoFiles in certain circumstances?
+ addFields("Dir", "CompiledGoFiles")
+ }
+ if cfg.Mode&NeedCompiledGoFiles != 0 {
+ addFields("Dir", "CompiledGoFiles", "Export")
+ }
+ if cfg.Mode&NeedImports != 0 {
+ // When imports are requested, DepOnly is used to distinguish between packages
+ // explicitly requested and transitive imports of those packages.
+ addFields("DepOnly", "Imports", "ImportMap")
+ if cfg.Tests {
+ addFields("TestImports", "XTestImports")
+ }
+ }
+ if cfg.Mode&NeedDeps != 0 {
+ addFields("DepOnly")
+ }
+ if usesExportData(cfg) {
+ // Request Dir in the unlikely case Export is not absolute.
+ addFields("Dir", "Export")
+ }
+ if cfg.Mode&needInternalForTest != 0 {
+ addFields("ForTest")
+ }
+ if cfg.Mode&needInternalDepsErrors != 0 {
+ addFields("DepsErrors")
+ }
+ if cfg.Mode&NeedModule != 0 {
+ addFields("Module")
+ }
+ if cfg.Mode&NeedEmbedFiles != 0 {
+ addFields("EmbedFiles")
+ }
+ if cfg.Mode&NeedEmbedPatterns != 0 {
+ addFields("EmbedPatterns")
+ }
+ return "-json=" + strings.Join(fields, ",")
+}
+
+func golistargs(cfg *Config, words []string, goVersion int) []string {
+ const findFlags = NeedImports | NeedTypes | NeedSyntax | NeedTypesInfo
+ fullargs := []string{
+ "-e", jsonFlag(cfg, goVersion),
+ fmt.Sprintf("-compiled=%t", cfg.Mode&(NeedCompiledGoFiles|NeedSyntax|NeedTypes|NeedTypesInfo|NeedTypesSizes) != 0),
+ fmt.Sprintf("-test=%t", cfg.Tests),
+ fmt.Sprintf("-export=%t", usesExportData(cfg)),
+ fmt.Sprintf("-deps=%t", cfg.Mode&NeedImports != 0),
+ // go list doesn't let you pass -test and -find together,
+ // probably because you'd just get the TestMain.
+ fmt.Sprintf("-find=%t", !cfg.Tests && cfg.Mode&findFlags == 0 && !usesExportData(cfg)),
+ }
+
+ // golang/go#60456: with go1.21 and later, go list serves pgo variants, which
+ // can be costly to compute and may result in redundant processing for the
+ // caller. Disable these variants. If someone wants to add e.g. a NeedPGO
+ // mode flag, that should be a separate proposal.
+ if goVersion >= 21 {
+ fullargs = append(fullargs, "-pgo=off")
+ }
+
+ fullargs = append(fullargs, cfg.BuildFlags...)
+ fullargs = append(fullargs, "--")
+ fullargs = append(fullargs, words...)
+ return fullargs
+}
+
+// cfgInvocation returns an Invocation that reflects cfg's settings.
+func (state *golistState) cfgInvocation() gocommand.Invocation {
+ cfg := state.cfg
+ return gocommand.Invocation{
+ BuildFlags: cfg.BuildFlags,
+ ModFile: cfg.modFile,
+ ModFlag: cfg.modFlag,
+ CleanEnv: cfg.Env != nil,
+ Env: cfg.Env,
+ Logf: cfg.Logf,
+ WorkingDir: cfg.Dir,
+ }
+}
+
+// invokeGo returns the stdout of a go command invocation.
+func (state *golistState) invokeGo(verb string, args ...string) (*bytes.Buffer, error) {
+ cfg := state.cfg
+
+ inv := state.cfgInvocation()
+
+ // For Go versions 1.16 and above, `go list` accepts overlays directly via
+ // the -overlay flag. Set it, if it's available.
+ //
+ // The check for "list" is not necessarily required, but we should avoid
+ // getting the go version if possible.
+ if verb == "list" {
+ goVersion, err := state.getGoVersion()
+ if err != nil {
+ return nil, err
+ }
+ if goVersion >= 16 {
+ filename, cleanup, err := state.writeOverlays()
+ if err != nil {
+ return nil, err
+ }
+ defer cleanup()
+ inv.Overlay = filename
+ }
+ }
+ inv.Verb = verb
+ inv.Args = args
+ gocmdRunner := cfg.gocmdRunner
+ if gocmdRunner == nil {
+ gocmdRunner = &gocommand.Runner{}
+ }
+ stdout, stderr, friendlyErr, err := gocmdRunner.RunRaw(cfg.Context, inv)
+ if err != nil {
+ // Check for 'go' executable not being found.
+ if ee, ok := err.(*exec.Error); ok && ee.Err == exec.ErrNotFound {
+ return nil, fmt.Errorf("'go list' driver requires 'go', but %s", exec.ErrNotFound)
+ }
+
+ exitErr, ok := err.(*exec.ExitError)
+ if !ok {
+ // Catastrophic error:
+ // - context cancellation
+ return nil, fmt.Errorf("couldn't run 'go': %w", err)
+ }
+
+ // Old go version?
+ if strings.Contains(stderr.String(), "flag provided but not defined") {
+ return nil, goTooOldError{fmt.Errorf("unsupported version of go: %s: %s", exitErr, stderr)}
+ }
+
+ // Related to #24854
+ if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "unexpected directory layout") {
+ return nil, friendlyErr
+ }
+
+ // Is there an error running the C compiler in cgo? This will be reported in the "Error" field
+ // and should be suppressed by go list -e.
+ //
+ // This condition is not perfect yet because the error message can include other error messages than runtime/cgo.
+ isPkgPathRune := func(r rune) bool {
+ // From https://golang.org/ref/spec#Import_declarations:
+ // Implementation restriction: A compiler may restrict ImportPaths to non-empty strings
+ // using only characters belonging to Unicode's L, M, N, P, and S general categories
+ // (the Graphic characters without spaces) and may also exclude the
+ // characters !"#$%&'()*,:;<=>?[\]^`{|} and the Unicode replacement character U+FFFD.
+ return unicode.IsOneOf([]*unicode.RangeTable{unicode.L, unicode.M, unicode.N, unicode.P, unicode.S}, r) &&
+ !strings.ContainsRune("!\"#$%&'()*,:;<=>?[\\]^`{|}\uFFFD", r)
+ }
+ // golang/go#36770: Handle case where cmd/go prints module download messages before the error.
+ msg := stderr.String()
+ for strings.HasPrefix(msg, "go: downloading") {
+ msg = msg[strings.IndexRune(msg, '\n')+1:]
+ }
+ if len(stderr.String()) > 0 && strings.HasPrefix(stderr.String(), "# ") {
+ msg := msg[len("# "):]
+ if strings.HasPrefix(strings.TrimLeftFunc(msg, isPkgPathRune), "\n") {
+ return stdout, nil
+ }
+ // Treat pkg-config errors as a special case (golang.org/issue/36770).
+ if strings.HasPrefix(msg, "pkg-config") {
+ return stdout, nil
+ }
+ }
+
+ // This error only appears in stderr. See golang.org/cl/166398 for a fix in go list to show
+ // the error in the Err section of stdout in case -e option is provided.
+ // This fix is provided for backwards compatibility.
+ if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "named files must be .go files") {
+ output := fmt.Sprintf(`{"ImportPath": "command-line-arguments","Incomplete": true,"Error": {"Pos": "","Err": %q}}`,
+ strings.Trim(stderr.String(), "\n"))
+ return bytes.NewBufferString(output), nil
+ }
+
+ // Similar to the previous error, but currently lacks a fix in Go.
+ if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "named files must all be in one directory") {
+ output := fmt.Sprintf(`{"ImportPath": "command-line-arguments","Incomplete": true,"Error": {"Pos": "","Err": %q}}`,
+ strings.Trim(stderr.String(), "\n"))
+ return bytes.NewBufferString(output), nil
+ }
+
+ // Backwards compatibility for Go 1.11 because 1.12 and 1.13 put the directory in the ImportPath.
+ // If the package doesn't exist, put the absolute path of the directory into the error message,
+ // as Go 1.13 list does.
+ const noSuchDirectory = "no such directory"
+ if len(stderr.String()) > 0 && strings.Contains(stderr.String(), noSuchDirectory) {
+ errstr := stderr.String()
+ abspath := strings.TrimSpace(errstr[strings.Index(errstr, noSuchDirectory)+len(noSuchDirectory):])
+ output := fmt.Sprintf(`{"ImportPath": %q,"Incomplete": true,"Error": {"Pos": "","Err": %q}}`,
+ abspath, strings.Trim(stderr.String(), "\n"))
+ return bytes.NewBufferString(output), nil
+ }
+
+ // Workaround for #29280: go list -e has incorrect behavior when an ad-hoc package doesn't exist.
+ // Note that the error message we look for in this case is different that the one looked for above.
+ if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "no such file or directory") {
+ output := fmt.Sprintf(`{"ImportPath": "command-line-arguments","Incomplete": true,"Error": {"Pos": "","Err": %q}}`,
+ strings.Trim(stderr.String(), "\n"))
+ return bytes.NewBufferString(output), nil
+ }
+
+ // Workaround for #34273. go list -e with GO111MODULE=on has incorrect behavior when listing a
+ // directory outside any module.
+ if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "outside available modules") {
+ output := fmt.Sprintf(`{"ImportPath": %q,"Incomplete": true,"Error": {"Pos": "","Err": %q}}`,
+ // TODO(matloob): command-line-arguments isn't correct here.
+ "command-line-arguments", strings.Trim(stderr.String(), "\n"))
+ return bytes.NewBufferString(output), nil
+ }
+
+ // Another variation of the previous error
+ if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "outside module root") {
+ output := fmt.Sprintf(`{"ImportPath": %q,"Incomplete": true,"Error": {"Pos": "","Err": %q}}`,
+ // TODO(matloob): command-line-arguments isn't correct here.
+ "command-line-arguments", strings.Trim(stderr.String(), "\n"))
+ return bytes.NewBufferString(output), nil
+ }
+
+ // Workaround for an instance of golang.org/issue/26755: go list -e will return a non-zero exit
+ // status if there's a dependency on a package that doesn't exist. But it should return
+ // a zero exit status and set an error on that package.
+ if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "no Go files in") {
+ // Don't clobber stdout if `go list` actually returned something.
+ if len(stdout.String()) > 0 {
+ return stdout, nil
+ }
+ // try to extract package name from string
+ stderrStr := stderr.String()
+ var importPath string
+ colon := strings.Index(stderrStr, ":")
+ if colon > 0 && strings.HasPrefix(stderrStr, "go build ") {
+ importPath = stderrStr[len("go build "):colon]
+ }
+ output := fmt.Sprintf(`{"ImportPath": %q,"Incomplete": true,"Error": {"Pos": "","Err": %q}}`,
+ importPath, strings.Trim(stderrStr, "\n"))
+ return bytes.NewBufferString(output), nil
+ }
+
+ // Export mode entails a build.
+ // If that build fails, errors appear on stderr
+ // (despite the -e flag) and the Export field is blank.
+ // Do not fail in that case.
+ // The same is true if an ad-hoc package given to go list doesn't exist.
+ // TODO(matloob): Remove these once we can depend on go list to exit with a zero status with -e even when
+ // packages don't exist or a build fails.
+ if !usesExportData(cfg) && !containsGoFile(args) {
+ return nil, friendlyErr
+ }
+ }
+ return stdout, nil
+}
+
+// OverlayJSON is the format overlay files are expected to be in.
+// The Replace map maps from overlaid paths to replacement paths:
+// the Go command will forward all reads trying to open
+// each overlaid path to its replacement path, or consider the overlaid
+// path not to exist if the replacement path is empty.
+//
+// From golang/go#39958.
+type OverlayJSON struct {
+ Replace map[string]string `json:"replace,omitempty"`
+}
+
+// writeOverlays writes out files for go list's -overlay flag, as described
+// above.
+func (state *golistState) writeOverlays() (filename string, cleanup func(), err error) {
+ // Do nothing if there are no overlays in the config.
+ if len(state.cfg.Overlay) == 0 {
+ return "", func() {}, nil
+ }
+ dir, err := os.MkdirTemp("", "gopackages-*")
+ if err != nil {
+ return "", nil, err
+ }
+ // The caller must clean up this directory, unless this function returns an
+ // error.
+ cleanup = func() {
+ os.RemoveAll(dir)
+ }
+ defer func() {
+ if err != nil {
+ cleanup()
+ }
+ }()
+ overlays := map[string]string{}
+ for k, v := range state.cfg.Overlay {
+ // Create a unique filename for the overlaid files, to avoid
+ // creating nested directories.
+ noSeparator := strings.Join(strings.Split(filepath.ToSlash(k), "/"), "")
+ f, err := os.CreateTemp(dir, fmt.Sprintf("*-%s", noSeparator))
+ if err != nil {
+ return "", func() {}, err
+ }
+ if _, err := f.Write(v); err != nil {
+ return "", func() {}, err
+ }
+ if err := f.Close(); err != nil {
+ return "", func() {}, err
+ }
+ overlays[k] = f.Name()
+ }
+ b, err := json.Marshal(OverlayJSON{Replace: overlays})
+ if err != nil {
+ return "", func() {}, err
+ }
+ // Write out the overlay file that contains the filepath mappings.
+ filename = filepath.Join(dir, "overlay.json")
+ if err := os.WriteFile(filename, b, 0665); err != nil {
+ return "", func() {}, err
+ }
+ return filename, cleanup, nil
+}
+
+func containsGoFile(s []string) bool {
+ for _, f := range s {
+ if strings.HasSuffix(f, ".go") {
+ return true
+ }
+ }
+ return false
+}
+
+func cmdDebugStr(cmd *exec.Cmd) string {
+ env := make(map[string]string)
+ for _, kv := range cmd.Env {
+ split := strings.SplitN(kv, "=", 2)
+ k, v := split[0], split[1]
+ env[k] = v
+ }
+
+ var args []string
+ for _, arg := range cmd.Args {
+ quoted := strconv.Quote(arg)
+ if quoted[1:len(quoted)-1] != arg || strings.Contains(arg, " ") {
+ args = append(args, quoted)
+ } else {
+ args = append(args, arg)
+ }
+ }
+ return fmt.Sprintf("GOROOT=%v GOPATH=%v GO111MODULE=%v GOPROXY=%v PWD=%v %v", env["GOROOT"], env["GOPATH"], env["GO111MODULE"], env["GOPROXY"], env["PWD"], strings.Join(args, " "))
+}
diff --git a/vendor/golang.org/x/tools/go/packages/golist_overlay.go b/vendor/golang.org/x/tools/go/packages/golist_overlay.go
new file mode 100644
index 0000000000..d823c474ad
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/packages/golist_overlay.go
@@ -0,0 +1,83 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package packages
+
+import (
+ "encoding/json"
+ "path/filepath"
+
+ "golang.org/x/tools/internal/gocommand"
+)
+
+// determineRootDirs returns a mapping from absolute directories that could
+// contain code to their corresponding import path prefixes.
+func (state *golistState) determineRootDirs() (map[string]string, error) {
+ env, err := state.getEnv()
+ if err != nil {
+ return nil, err
+ }
+ if env["GOMOD"] != "" {
+ state.rootsOnce.Do(func() {
+ state.rootDirs, state.rootDirsError = state.determineRootDirsModules()
+ })
+ } else {
+ state.rootsOnce.Do(func() {
+ state.rootDirs, state.rootDirsError = state.determineRootDirsGOPATH()
+ })
+ }
+ return state.rootDirs, state.rootDirsError
+}
+
+func (state *golistState) determineRootDirsModules() (map[string]string, error) {
+ // List all of the modules--the first will be the directory for the main
+ // module. Any replaced modules will also need to be treated as roots.
+ // Editing files in the module cache isn't a great idea, so we don't
+ // plan to ever support that.
+ out, err := state.invokeGo("list", "-m", "-json", "all")
+ if err != nil {
+ // 'go list all' will fail if we're outside of a module and
+ // GO111MODULE=on. Try falling back without 'all'.
+ var innerErr error
+ out, innerErr = state.invokeGo("list", "-m", "-json")
+ if innerErr != nil {
+ return nil, err
+ }
+ }
+ roots := map[string]string{}
+ modules := map[string]string{}
+ var i int
+ for dec := json.NewDecoder(out); dec.More(); {
+ mod := new(gocommand.ModuleJSON)
+ if err := dec.Decode(mod); err != nil {
+ return nil, err
+ }
+ if mod.Dir != "" && mod.Path != "" {
+ // This is a valid module; add it to the map.
+ absDir, err := filepath.Abs(mod.Dir)
+ if err != nil {
+ return nil, err
+ }
+ modules[absDir] = mod.Path
+ // The first result is the main module.
+ if i == 0 || mod.Replace != nil && mod.Replace.Path != "" {
+ roots[absDir] = mod.Path
+ }
+ }
+ i++
+ }
+ return roots, nil
+}
+
+func (state *golistState) determineRootDirsGOPATH() (map[string]string, error) {
+ m := map[string]string{}
+ for _, dir := range filepath.SplitList(state.mustGetEnv()["GOPATH"]) {
+ absDir, err := filepath.Abs(dir)
+ if err != nil {
+ return nil, err
+ }
+ m[filepath.Join(absDir, "src")] = ""
+ }
+ return m, nil
+}
diff --git a/vendor/golang.org/x/tools/go/packages/loadmode_string.go b/vendor/golang.org/x/tools/go/packages/loadmode_string.go
new file mode 100644
index 0000000000..5c080d21b5
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/packages/loadmode_string.go
@@ -0,0 +1,57 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package packages
+
+import (
+ "fmt"
+ "strings"
+)
+
+var allModes = []LoadMode{
+ NeedName,
+ NeedFiles,
+ NeedCompiledGoFiles,
+ NeedImports,
+ NeedDeps,
+ NeedExportFile,
+ NeedTypes,
+ NeedSyntax,
+ NeedTypesInfo,
+ NeedTypesSizes,
+}
+
+var modeStrings = []string{
+ "NeedName",
+ "NeedFiles",
+ "NeedCompiledGoFiles",
+ "NeedImports",
+ "NeedDeps",
+ "NeedExportFile",
+ "NeedTypes",
+ "NeedSyntax",
+ "NeedTypesInfo",
+ "NeedTypesSizes",
+}
+
+func (mod LoadMode) String() string {
+ m := mod
+ if m == 0 {
+ return "LoadMode(0)"
+ }
+ var out []string
+ for i, x := range allModes {
+ if x > m {
+ break
+ }
+ if (m & x) != 0 {
+ out = append(out, modeStrings[i])
+ m = m ^ x
+ }
+ }
+ if m != 0 {
+ out = append(out, "Unknown")
+ }
+ return fmt.Sprintf("LoadMode(%s)", strings.Join(out, "|"))
+}
diff --git a/vendor/golang.org/x/tools/go/packages/packages.go b/vendor/golang.org/x/tools/go/packages/packages.go
new file mode 100644
index 0000000000..81e9e6a727
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/packages/packages.go
@@ -0,0 +1,1347 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package packages
+
+// See doc.go for package documentation and implementation notes.
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "go/ast"
+ "go/parser"
+ "go/scanner"
+ "go/token"
+ "go/types"
+ "io"
+ "log"
+ "os"
+ "path/filepath"
+ "runtime"
+ "strings"
+ "sync"
+ "time"
+
+ "golang.org/x/tools/go/gcexportdata"
+ "golang.org/x/tools/internal/gocommand"
+ "golang.org/x/tools/internal/packagesinternal"
+ "golang.org/x/tools/internal/typesinternal"
+ "golang.org/x/tools/internal/versions"
+)
+
+// A LoadMode controls the amount of detail to return when loading.
+// The bits below can be combined to specify which fields should be
+// filled in the result packages.
+// The zero value is a special case, equivalent to combining
+// the NeedName, NeedFiles, and NeedCompiledGoFiles bits.
+// ID and Errors (if present) will always be filled.
+// Load may return more information than requested.
+type LoadMode int
+
+const (
+ // NeedName adds Name and PkgPath.
+ NeedName LoadMode = 1 << iota
+
+ // NeedFiles adds GoFiles and OtherFiles.
+ NeedFiles
+
+ // NeedCompiledGoFiles adds CompiledGoFiles.
+ NeedCompiledGoFiles
+
+ // NeedImports adds Imports. If NeedDeps is not set, the Imports field will contain
+ // "placeholder" Packages with only the ID set.
+ NeedImports
+
+ // NeedDeps adds the fields requested by the LoadMode in the packages in Imports.
+ NeedDeps
+
+ // NeedExportFile adds ExportFile.
+ NeedExportFile
+
+ // NeedTypes adds Types, Fset, and IllTyped.
+ NeedTypes
+
+ // NeedSyntax adds Syntax.
+ NeedSyntax
+
+ // NeedTypesInfo adds TypesInfo.
+ NeedTypesInfo
+
+ // NeedTypesSizes adds TypesSizes.
+ NeedTypesSizes
+
+ // needInternalDepsErrors adds the internal deps errors field for use by gopls.
+ needInternalDepsErrors
+
+ // needInternalForTest adds the internal forTest field.
+ // Tests must also be set on the context for this field to be populated.
+ needInternalForTest
+
+ // typecheckCgo enables full support for type checking cgo. Requires Go 1.15+.
+ // Modifies CompiledGoFiles and Types, and has no effect on its own.
+ typecheckCgo
+
+ // NeedModule adds Module.
+ NeedModule
+
+ // NeedEmbedFiles adds EmbedFiles.
+ NeedEmbedFiles
+
+ // NeedEmbedPatterns adds EmbedPatterns.
+ NeedEmbedPatterns
+)
+
+const (
+ // Deprecated: LoadFiles exists for historical compatibility
+ // and should not be used. Please directly specify the needed fields using the Need values.
+ LoadFiles = NeedName | NeedFiles | NeedCompiledGoFiles
+
+ // Deprecated: LoadImports exists for historical compatibility
+ // and should not be used. Please directly specify the needed fields using the Need values.
+ LoadImports = LoadFiles | NeedImports
+
+ // Deprecated: LoadTypes exists for historical compatibility
+ // and should not be used. Please directly specify the needed fields using the Need values.
+ LoadTypes = LoadImports | NeedTypes | NeedTypesSizes
+
+ // Deprecated: LoadSyntax exists for historical compatibility
+ // and should not be used. Please directly specify the needed fields using the Need values.
+ LoadSyntax = LoadTypes | NeedSyntax | NeedTypesInfo
+
+ // Deprecated: LoadAllSyntax exists for historical compatibility
+ // and should not be used. Please directly specify the needed fields using the Need values.
+ LoadAllSyntax = LoadSyntax | NeedDeps
+
+ // Deprecated: NeedExportsFile is a historical misspelling of NeedExportFile.
+ NeedExportsFile = NeedExportFile
+)
+
+// A Config specifies details about how packages should be loaded.
+// The zero value is a valid configuration.
+// Calls to Load do not modify this struct.
+type Config struct {
+ // Mode controls the level of information returned for each package.
+ Mode LoadMode
+
+ // Context specifies the context for the load operation.
+ // If the context is cancelled, the loader may stop early
+ // and return an ErrCancelled error.
+ // If Context is nil, the load cannot be cancelled.
+ Context context.Context
+
+ // Logf is the logger for the config.
+ // If the user provides a logger, debug logging is enabled.
+ // If the GOPACKAGESDEBUG environment variable is set to true,
+ // but the logger is nil, default to log.Printf.
+ Logf func(format string, args ...interface{})
+
+ // Dir is the directory in which to run the build system's query tool
+ // that provides information about the packages.
+ // If Dir is empty, the tool is run in the current directory.
+ Dir string
+
+ // Env is the environment to use when invoking the build system's query tool.
+ // If Env is nil, the current environment is used.
+ // As in os/exec's Cmd, only the last value in the slice for
+ // each environment key is used. To specify the setting of only
+ // a few variables, append to the current environment, as in:
+ //
+ // opt.Env = append(os.Environ(), "GOOS=plan9", "GOARCH=386")
+ //
+ Env []string
+
+ // gocmdRunner guards go command calls from concurrency errors.
+ gocmdRunner *gocommand.Runner
+
+ // BuildFlags is a list of command-line flags to be passed through to
+ // the build system's query tool.
+ BuildFlags []string
+
+ // modFile will be used for -modfile in go command invocations.
+ modFile string
+
+ // modFlag will be used for -modfile in go command invocations.
+ modFlag string
+
+ // Fset provides source position information for syntax trees and types.
+ // If Fset is nil, Load will use a new fileset, but preserve Fset's value.
+ Fset *token.FileSet
+
+ // ParseFile is called to read and parse each file
+ // when preparing a package's type-checked syntax tree.
+ // It must be safe to call ParseFile simultaneously from multiple goroutines.
+ // If ParseFile is nil, the loader will uses parser.ParseFile.
+ //
+ // ParseFile should parse the source from src and use filename only for
+ // recording position information.
+ //
+ // An application may supply a custom implementation of ParseFile
+ // to change the effective file contents or the behavior of the parser,
+ // or to modify the syntax tree. For example, selectively eliminating
+ // unwanted function bodies can significantly accelerate type checking.
+ ParseFile func(fset *token.FileSet, filename string, src []byte) (*ast.File, error)
+
+ // If Tests is set, the loader includes not just the packages
+ // matching a particular pattern but also any related test packages,
+ // including test-only variants of the package and the test executable.
+ //
+ // For example, when using the go command, loading "fmt" with Tests=true
+ // returns four packages, with IDs "fmt" (the standard package),
+ // "fmt [fmt.test]" (the package as compiled for the test),
+ // "fmt_test" (the test functions from source files in package fmt_test),
+ // and "fmt.test" (the test binary).
+ //
+ // In build systems with explicit names for tests,
+ // setting Tests may have no effect.
+ Tests bool
+
+ // Overlay provides a mapping of absolute file paths to file contents.
+ // If the file with the given path already exists, the parser will use the
+ // alternative file contents provided by the map.
+ //
+ // Overlays provide incomplete support for when a given file doesn't
+ // already exist on disk. See the package doc above for more details.
+ Overlay map[string][]byte
+}
+
+// driver is the type for functions that query the build system for the
+// packages named by the patterns.
+type driver func(cfg *Config, patterns ...string) (*driverResponse, error)
+
+// driverResponse contains the results for a driver query.
+type driverResponse struct {
+ // NotHandled is returned if the request can't be handled by the current
+ // driver. If an external driver returns a response with NotHandled, the
+ // rest of the driverResponse is ignored, and go/packages will fallback
+ // to the next driver. If go/packages is extended in the future to support
+ // lists of multiple drivers, go/packages will fall back to the next driver.
+ NotHandled bool
+
+ // Compiler and Arch are the arguments pass of types.SizesFor
+ // to get a types.Sizes to use when type checking.
+ Compiler string
+ Arch string
+
+ // Roots is the set of package IDs that make up the root packages.
+ // We have to encode this separately because when we encode a single package
+ // we cannot know if it is one of the roots as that requires knowledge of the
+ // graph it is part of.
+ Roots []string `json:",omitempty"`
+
+ // Packages is the full set of packages in the graph.
+ // The packages are not connected into a graph.
+ // The Imports if populated will be stubs that only have their ID set.
+ // Imports will be connected and then type and syntax information added in a
+ // later pass (see refine).
+ Packages []*Package
+
+ // GoVersion is the minor version number used by the driver
+ // (e.g. the go command on the PATH) when selecting .go files.
+ // Zero means unknown.
+ GoVersion int
+}
+
+// Load loads and returns the Go packages named by the given patterns.
+//
+// Config specifies loading options;
+// nil behaves the same as an empty Config.
+//
+// Load returns an error if any of the patterns was invalid
+// as defined by the underlying build system.
+// It may return an empty list of packages without an error,
+// for instance for an empty expansion of a valid wildcard.
+// Errors associated with a particular package are recorded in the
+// corresponding Package's Errors list, and do not cause Load to
+// return an error. Clients may need to handle such errors before
+// proceeding with further analysis. The PrintErrors function is
+// provided for convenient display of all errors.
+func Load(cfg *Config, patterns ...string) ([]*Package, error) {
+ ld := newLoader(cfg)
+ response, external, err := defaultDriver(&ld.Config, patterns...)
+ if err != nil {
+ return nil, err
+ }
+
+ ld.sizes = types.SizesFor(response.Compiler, response.Arch)
+ if ld.sizes == nil && ld.Config.Mode&(NeedTypes|NeedTypesSizes|NeedTypesInfo) != 0 {
+ // Type size information is needed but unavailable.
+ if external {
+ // An external driver may fail to populate the Compiler/GOARCH fields,
+ // especially since they are relatively new (see #63700).
+ // Provide a sensible fallback in this case.
+ ld.sizes = types.SizesFor("gc", runtime.GOARCH)
+ if ld.sizes == nil { // gccgo-only arch
+ ld.sizes = types.SizesFor("gc", "amd64")
+ }
+ } else {
+ // Go list should never fail to deliver accurate size information.
+ // Reject the whole Load since the error is the same for every package.
+ return nil, fmt.Errorf("can't determine type sizes for compiler %q on GOARCH %q",
+ response.Compiler, response.Arch)
+ }
+ }
+
+ return ld.refine(response)
+}
+
+// defaultDriver is a driver that implements go/packages' fallback behavior.
+// It will try to request to an external driver, if one exists. If there's
+// no external driver, or the driver returns a response with NotHandled set,
+// defaultDriver will fall back to the go list driver.
+// The boolean result indicates that an external driver handled the request.
+func defaultDriver(cfg *Config, patterns ...string) (*driverResponse, bool, error) {
+ if driver := findExternalDriver(cfg); driver != nil {
+ response, err := driver(cfg, patterns...)
+ if err != nil {
+ return nil, false, err
+ } else if !response.NotHandled {
+ return response, true, nil
+ }
+ // (fall through)
+ }
+
+ response, err := goListDriver(cfg, patterns...)
+ return response, false, err
+}
+
+// A Package describes a loaded Go package.
+type Package struct {
+ // ID is a unique identifier for a package,
+ // in a syntax provided by the underlying build system.
+ //
+ // Because the syntax varies based on the build system,
+ // clients should treat IDs as opaque and not attempt to
+ // interpret them.
+ ID string
+
+ // Name is the package name as it appears in the package source code.
+ Name string
+
+ // PkgPath is the package path as used by the go/types package.
+ PkgPath string
+
+ // Errors contains any errors encountered querying the metadata
+ // of the package, or while parsing or type-checking its files.
+ Errors []Error
+
+ // TypeErrors contains the subset of errors produced during type checking.
+ TypeErrors []types.Error
+
+ // GoFiles lists the absolute file paths of the package's Go source files.
+ // It may include files that should not be compiled, for example because
+ // they contain non-matching build tags, are documentary pseudo-files such as
+ // unsafe/unsafe.go or builtin/builtin.go, or are subject to cgo preprocessing.
+ GoFiles []string
+
+ // CompiledGoFiles lists the absolute file paths of the package's source
+ // files that are suitable for type checking.
+ // This may differ from GoFiles if files are processed before compilation.
+ CompiledGoFiles []string
+
+ // OtherFiles lists the absolute file paths of the package's non-Go source files,
+ // including assembly, C, C++, Fortran, Objective-C, SWIG, and so on.
+ OtherFiles []string
+
+ // EmbedFiles lists the absolute file paths of the package's files
+ // embedded with go:embed.
+ EmbedFiles []string
+
+ // EmbedPatterns lists the absolute file patterns of the package's
+ // files embedded with go:embed.
+ EmbedPatterns []string
+
+ // IgnoredFiles lists source files that are not part of the package
+ // using the current build configuration but that might be part of
+ // the package using other build configurations.
+ IgnoredFiles []string
+
+ // ExportFile is the absolute path to a file containing type
+ // information for the package as provided by the build system.
+ ExportFile string
+
+ // Imports maps import paths appearing in the package's Go source files
+ // to corresponding loaded Packages.
+ Imports map[string]*Package
+
+ // Types provides type information for the package.
+ // The NeedTypes LoadMode bit sets this field for packages matching the
+ // patterns; type information for dependencies may be missing or incomplete,
+ // unless NeedDeps and NeedImports are also set.
+ Types *types.Package
+
+ // Fset provides position information for Types, TypesInfo, and Syntax.
+ // It is set only when Types is set.
+ Fset *token.FileSet
+
+ // IllTyped indicates whether the package or any dependency contains errors.
+ // It is set only when Types is set.
+ IllTyped bool
+
+ // Syntax is the package's syntax trees, for the files listed in CompiledGoFiles.
+ //
+ // The NeedSyntax LoadMode bit populates this field for packages matching the patterns.
+ // If NeedDeps and NeedImports are also set, this field will also be populated
+ // for dependencies.
+ //
+ // Syntax is kept in the same order as CompiledGoFiles, with the caveat that nils are
+ // removed. If parsing returned nil, Syntax may be shorter than CompiledGoFiles.
+ Syntax []*ast.File
+
+ // TypesInfo provides type information about the package's syntax trees.
+ // It is set only when Syntax is set.
+ TypesInfo *types.Info
+
+ // TypesSizes provides the effective size function for types in TypesInfo.
+ TypesSizes types.Sizes
+
+ // forTest is the package under test, if any.
+ forTest string
+
+ // depsErrors is the DepsErrors field from the go list response, if any.
+ depsErrors []*packagesinternal.PackageError
+
+ // module is the module information for the package if it exists.
+ Module *Module
+}
+
+// Module provides module information for a package.
+type Module struct {
+ Path string // module path
+ Version string // module version
+ Replace *Module // replaced by this module
+ Time *time.Time // time version was created
+ Main bool // is this the main module?
+ Indirect bool // is this module only an indirect dependency of main module?
+ Dir string // directory holding files for this module, if any
+ GoMod string // path to go.mod file used when loading this module, if any
+ GoVersion string // go version used in module
+ Error *ModuleError // error loading module
+}
+
+// ModuleError holds errors loading a module.
+type ModuleError struct {
+ Err string // the error itself
+}
+
+func init() {
+ packagesinternal.GetForTest = func(p interface{}) string {
+ return p.(*Package).forTest
+ }
+ packagesinternal.GetDepsErrors = func(p interface{}) []*packagesinternal.PackageError {
+ return p.(*Package).depsErrors
+ }
+ packagesinternal.SetModFile = func(config interface{}, value string) {
+ config.(*Config).modFile = value
+ }
+ packagesinternal.SetModFlag = func(config interface{}, value string) {
+ config.(*Config).modFlag = value
+ }
+ packagesinternal.TypecheckCgo = int(typecheckCgo)
+ packagesinternal.DepsErrors = int(needInternalDepsErrors)
+ packagesinternal.ForTest = int(needInternalForTest)
+}
+
+// An Error describes a problem with a package's metadata, syntax, or types.
+type Error struct {
+ Pos string // "file:line:col" or "file:line" or "" or "-"
+ Msg string
+ Kind ErrorKind
+}
+
+// ErrorKind describes the source of the error, allowing the user to
+// differentiate between errors generated by the driver, the parser, or the
+// type-checker.
+type ErrorKind int
+
+const (
+ UnknownError ErrorKind = iota
+ ListError
+ ParseError
+ TypeError
+)
+
+func (err Error) Error() string {
+ pos := err.Pos
+ if pos == "" {
+ pos = "-" // like token.Position{}.String()
+ }
+ return pos + ": " + err.Msg
+}
+
+// flatPackage is the JSON form of Package
+// It drops all the type and syntax fields, and transforms the Imports
+//
+// TODO(adonovan): identify this struct with Package, effectively
+// publishing the JSON protocol.
+type flatPackage struct {
+ ID string
+ Name string `json:",omitempty"`
+ PkgPath string `json:",omitempty"`
+ Errors []Error `json:",omitempty"`
+ GoFiles []string `json:",omitempty"`
+ CompiledGoFiles []string `json:",omitempty"`
+ OtherFiles []string `json:",omitempty"`
+ EmbedFiles []string `json:",omitempty"`
+ EmbedPatterns []string `json:",omitempty"`
+ IgnoredFiles []string `json:",omitempty"`
+ ExportFile string `json:",omitempty"`
+ Imports map[string]string `json:",omitempty"`
+}
+
+// MarshalJSON returns the Package in its JSON form.
+// For the most part, the structure fields are written out unmodified, and
+// the type and syntax fields are skipped.
+// The imports are written out as just a map of path to package id.
+// The errors are written using a custom type that tries to preserve the
+// structure of error types we know about.
+//
+// This method exists to enable support for additional build systems. It is
+// not intended for use by clients of the API and we may change the format.
+func (p *Package) MarshalJSON() ([]byte, error) {
+ flat := &flatPackage{
+ ID: p.ID,
+ Name: p.Name,
+ PkgPath: p.PkgPath,
+ Errors: p.Errors,
+ GoFiles: p.GoFiles,
+ CompiledGoFiles: p.CompiledGoFiles,
+ OtherFiles: p.OtherFiles,
+ EmbedFiles: p.EmbedFiles,
+ EmbedPatterns: p.EmbedPatterns,
+ IgnoredFiles: p.IgnoredFiles,
+ ExportFile: p.ExportFile,
+ }
+ if len(p.Imports) > 0 {
+ flat.Imports = make(map[string]string, len(p.Imports))
+ for path, ipkg := range p.Imports {
+ flat.Imports[path] = ipkg.ID
+ }
+ }
+ return json.Marshal(flat)
+}
+
+// UnmarshalJSON reads in a Package from its JSON format.
+// See MarshalJSON for details about the format accepted.
+func (p *Package) UnmarshalJSON(b []byte) error {
+ flat := &flatPackage{}
+ if err := json.Unmarshal(b, &flat); err != nil {
+ return err
+ }
+ *p = Package{
+ ID: flat.ID,
+ Name: flat.Name,
+ PkgPath: flat.PkgPath,
+ Errors: flat.Errors,
+ GoFiles: flat.GoFiles,
+ CompiledGoFiles: flat.CompiledGoFiles,
+ OtherFiles: flat.OtherFiles,
+ EmbedFiles: flat.EmbedFiles,
+ EmbedPatterns: flat.EmbedPatterns,
+ ExportFile: flat.ExportFile,
+ }
+ if len(flat.Imports) > 0 {
+ p.Imports = make(map[string]*Package, len(flat.Imports))
+ for path, id := range flat.Imports {
+ p.Imports[path] = &Package{ID: id}
+ }
+ }
+ return nil
+}
+
+func (p *Package) String() string { return p.ID }
+
+// loaderPackage augments Package with state used during the loading phase
+type loaderPackage struct {
+ *Package
+ importErrors map[string]error // maps each bad import to its error
+ loadOnce sync.Once
+ color uint8 // for cycle detection
+ needsrc bool // load from source (Mode >= LoadTypes)
+ needtypes bool // type information is either requested or depended on
+ initial bool // package was matched by a pattern
+ goVersion int // minor version number of go command on PATH
+}
+
+// loader holds the working state of a single call to load.
+type loader struct {
+ pkgs map[string]*loaderPackage
+ Config
+ sizes types.Sizes // non-nil if needed by mode
+ parseCache map[string]*parseValue
+ parseCacheMu sync.Mutex
+ exportMu sync.Mutex // enforces mutual exclusion of exportdata operations
+
+ // Config.Mode contains the implied mode (see impliedLoadMode).
+ // Implied mode contains all the fields we need the data for.
+ // In requestedMode there are the actually requested fields.
+ // We'll zero them out before returning packages to the user.
+ // This makes it easier for us to get the conditions where
+ // we need certain modes right.
+ requestedMode LoadMode
+}
+
+type parseValue struct {
+ f *ast.File
+ err error
+ ready chan struct{}
+}
+
+func newLoader(cfg *Config) *loader {
+ ld := &loader{
+ parseCache: map[string]*parseValue{},
+ }
+ if cfg != nil {
+ ld.Config = *cfg
+ // If the user has provided a logger, use it.
+ ld.Config.Logf = cfg.Logf
+ }
+ if ld.Config.Logf == nil {
+ // If the GOPACKAGESDEBUG environment variable is set to true,
+ // but the user has not provided a logger, default to log.Printf.
+ if debug {
+ ld.Config.Logf = log.Printf
+ } else {
+ ld.Config.Logf = func(format string, args ...interface{}) {}
+ }
+ }
+ if ld.Config.Mode == 0 {
+ ld.Config.Mode = NeedName | NeedFiles | NeedCompiledGoFiles // Preserve zero behavior of Mode for backwards compatibility.
+ }
+ if ld.Config.Env == nil {
+ ld.Config.Env = os.Environ()
+ }
+ if ld.Config.gocmdRunner == nil {
+ ld.Config.gocmdRunner = &gocommand.Runner{}
+ }
+ if ld.Context == nil {
+ ld.Context = context.Background()
+ }
+ if ld.Dir == "" {
+ if dir, err := os.Getwd(); err == nil {
+ ld.Dir = dir
+ }
+ }
+
+ // Save the actually requested fields. We'll zero them out before returning packages to the user.
+ ld.requestedMode = ld.Mode
+ ld.Mode = impliedLoadMode(ld.Mode)
+
+ if ld.Mode&NeedTypes != 0 || ld.Mode&NeedSyntax != 0 {
+ if ld.Fset == nil {
+ ld.Fset = token.NewFileSet()
+ }
+
+ // ParseFile is required even in LoadTypes mode
+ // because we load source if export data is missing.
+ if ld.ParseFile == nil {
+ ld.ParseFile = func(fset *token.FileSet, filename string, src []byte) (*ast.File, error) {
+ const mode = parser.AllErrors | parser.ParseComments
+ return parser.ParseFile(fset, filename, src, mode)
+ }
+ }
+ }
+
+ return ld
+}
+
+// refine connects the supplied packages into a graph and then adds type
+// and syntax information as requested by the LoadMode.
+func (ld *loader) refine(response *driverResponse) ([]*Package, error) {
+ roots := response.Roots
+ rootMap := make(map[string]int, len(roots))
+ for i, root := range roots {
+ rootMap[root] = i
+ }
+ ld.pkgs = make(map[string]*loaderPackage)
+ // first pass, fixup and build the map and roots
+ var initial = make([]*loaderPackage, len(roots))
+ for _, pkg := range response.Packages {
+ rootIndex := -1
+ if i, found := rootMap[pkg.ID]; found {
+ rootIndex = i
+ }
+
+ // Overlays can invalidate export data.
+ // TODO(matloob): make this check fine-grained based on dependencies on overlaid files
+ exportDataInvalid := len(ld.Overlay) > 0 || pkg.ExportFile == "" && pkg.PkgPath != "unsafe"
+ // This package needs type information if the caller requested types and the package is
+ // either a root, or it's a non-root and the user requested dependencies ...
+ needtypes := (ld.Mode&NeedTypes|NeedTypesInfo != 0 && (rootIndex >= 0 || ld.Mode&NeedDeps != 0))
+ // This package needs source if the call requested source (or types info, which implies source)
+ // and the package is either a root, or itas a non- root and the user requested dependencies...
+ needsrc := ((ld.Mode&(NeedSyntax|NeedTypesInfo) != 0 && (rootIndex >= 0 || ld.Mode&NeedDeps != 0)) ||
+ // ... or if we need types and the exportData is invalid. We fall back to (incompletely)
+ // typechecking packages from source if they fail to compile.
+ (ld.Mode&(NeedTypes|NeedTypesInfo) != 0 && exportDataInvalid)) && pkg.PkgPath != "unsafe"
+ lpkg := &loaderPackage{
+ Package: pkg,
+ needtypes: needtypes,
+ needsrc: needsrc,
+ goVersion: response.GoVersion,
+ }
+ ld.pkgs[lpkg.ID] = lpkg
+ if rootIndex >= 0 {
+ initial[rootIndex] = lpkg
+ lpkg.initial = true
+ }
+ }
+ for i, root := range roots {
+ if initial[i] == nil {
+ return nil, fmt.Errorf("root package %v is missing", root)
+ }
+ }
+
+ if ld.Mode&NeedImports != 0 {
+ // Materialize the import graph.
+
+ const (
+ white = 0 // new
+ grey = 1 // in progress
+ black = 2 // complete
+ )
+
+ // visit traverses the import graph, depth-first,
+ // and materializes the graph as Packages.Imports.
+ //
+ // Valid imports are saved in the Packages.Import map.
+ // Invalid imports (cycles and missing nodes) are saved in the importErrors map.
+ // Thus, even in the presence of both kinds of errors,
+ // the Import graph remains a DAG.
+ //
+ // visit returns whether the package needs src or has a transitive
+ // dependency on a package that does. These are the only packages
+ // for which we load source code.
+ var stack []*loaderPackage
+ var visit func(lpkg *loaderPackage) bool
+ visit = func(lpkg *loaderPackage) bool {
+ switch lpkg.color {
+ case black:
+ return lpkg.needsrc
+ case grey:
+ panic("internal error: grey node")
+ }
+ lpkg.color = grey
+ stack = append(stack, lpkg) // push
+ stubs := lpkg.Imports // the structure form has only stubs with the ID in the Imports
+ lpkg.Imports = make(map[string]*Package, len(stubs))
+ for importPath, ipkg := range stubs {
+ var importErr error
+ imp := ld.pkgs[ipkg.ID]
+ if imp == nil {
+ // (includes package "C" when DisableCgo)
+ importErr = fmt.Errorf("missing package: %q", ipkg.ID)
+ } else if imp.color == grey {
+ importErr = fmt.Errorf("import cycle: %s", stack)
+ }
+ if importErr != nil {
+ if lpkg.importErrors == nil {
+ lpkg.importErrors = make(map[string]error)
+ }
+ lpkg.importErrors[importPath] = importErr
+ continue
+ }
+
+ if visit(imp) {
+ lpkg.needsrc = true
+ }
+ lpkg.Imports[importPath] = imp.Package
+ }
+
+ // Complete type information is required for the
+ // immediate dependencies of each source package.
+ if lpkg.needsrc && ld.Mode&NeedTypes != 0 {
+ for _, ipkg := range lpkg.Imports {
+ ld.pkgs[ipkg.ID].needtypes = true
+ }
+ }
+
+ // NeedTypeSizes causes TypeSizes to be set even
+ // on packages for which types aren't needed.
+ if ld.Mode&NeedTypesSizes != 0 {
+ lpkg.TypesSizes = ld.sizes
+ }
+ stack = stack[:len(stack)-1] // pop
+ lpkg.color = black
+
+ return lpkg.needsrc
+ }
+
+ // For each initial package, create its import DAG.
+ for _, lpkg := range initial {
+ visit(lpkg)
+ }
+
+ } else {
+ // !NeedImports: drop the stub (ID-only) import packages
+ // that we are not even going to try to resolve.
+ for _, lpkg := range initial {
+ lpkg.Imports = nil
+ }
+ }
+
+ // Load type data and syntax if needed, starting at
+ // the initial packages (roots of the import DAG).
+ if ld.Mode&NeedTypes != 0 || ld.Mode&NeedSyntax != 0 {
+ var wg sync.WaitGroup
+ for _, lpkg := range initial {
+ wg.Add(1)
+ go func(lpkg *loaderPackage) {
+ ld.loadRecursive(lpkg)
+ wg.Done()
+ }(lpkg)
+ }
+ wg.Wait()
+ }
+
+ result := make([]*Package, len(initial))
+ for i, lpkg := range initial {
+ result[i] = lpkg.Package
+ }
+ for i := range ld.pkgs {
+ // Clear all unrequested fields,
+ // to catch programs that use more than they request.
+ if ld.requestedMode&NeedName == 0 {
+ ld.pkgs[i].Name = ""
+ ld.pkgs[i].PkgPath = ""
+ }
+ if ld.requestedMode&NeedFiles == 0 {
+ ld.pkgs[i].GoFiles = nil
+ ld.pkgs[i].OtherFiles = nil
+ ld.pkgs[i].IgnoredFiles = nil
+ }
+ if ld.requestedMode&NeedEmbedFiles == 0 {
+ ld.pkgs[i].EmbedFiles = nil
+ }
+ if ld.requestedMode&NeedEmbedPatterns == 0 {
+ ld.pkgs[i].EmbedPatterns = nil
+ }
+ if ld.requestedMode&NeedCompiledGoFiles == 0 {
+ ld.pkgs[i].CompiledGoFiles = nil
+ }
+ if ld.requestedMode&NeedImports == 0 {
+ ld.pkgs[i].Imports = nil
+ }
+ if ld.requestedMode&NeedExportFile == 0 {
+ ld.pkgs[i].ExportFile = ""
+ }
+ if ld.requestedMode&NeedTypes == 0 {
+ ld.pkgs[i].Types = nil
+ ld.pkgs[i].Fset = nil
+ ld.pkgs[i].IllTyped = false
+ }
+ if ld.requestedMode&NeedSyntax == 0 {
+ ld.pkgs[i].Syntax = nil
+ }
+ if ld.requestedMode&NeedTypesInfo == 0 {
+ ld.pkgs[i].TypesInfo = nil
+ }
+ if ld.requestedMode&NeedTypesSizes == 0 {
+ ld.pkgs[i].TypesSizes = nil
+ }
+ if ld.requestedMode&NeedModule == 0 {
+ ld.pkgs[i].Module = nil
+ }
+ }
+
+ return result, nil
+}
+
+// loadRecursive loads the specified package and its dependencies,
+// recursively, in parallel, in topological order.
+// It is atomic and idempotent.
+// Precondition: ld.Mode&NeedTypes.
+func (ld *loader) loadRecursive(lpkg *loaderPackage) {
+ lpkg.loadOnce.Do(func() {
+ // Load the direct dependencies, in parallel.
+ var wg sync.WaitGroup
+ for _, ipkg := range lpkg.Imports {
+ imp := ld.pkgs[ipkg.ID]
+ wg.Add(1)
+ go func(imp *loaderPackage) {
+ ld.loadRecursive(imp)
+ wg.Done()
+ }(imp)
+ }
+ wg.Wait()
+ ld.loadPackage(lpkg)
+ })
+}
+
+// loadPackage loads the specified package.
+// It must be called only once per Package,
+// after immediate dependencies are loaded.
+// Precondition: ld.Mode & NeedTypes.
+func (ld *loader) loadPackage(lpkg *loaderPackage) {
+ if lpkg.PkgPath == "unsafe" {
+ // Fill in the blanks to avoid surprises.
+ lpkg.Types = types.Unsafe
+ lpkg.Fset = ld.Fset
+ lpkg.Syntax = []*ast.File{}
+ lpkg.TypesInfo = new(types.Info)
+ lpkg.TypesSizes = ld.sizes
+ return
+ }
+
+ // Call NewPackage directly with explicit name.
+ // This avoids skew between golist and go/types when the files'
+ // package declarations are inconsistent.
+ lpkg.Types = types.NewPackage(lpkg.PkgPath, lpkg.Name)
+ lpkg.Fset = ld.Fset
+
+ // Subtle: we populate all Types fields with an empty Package
+ // before loading export data so that export data processing
+ // never has to create a types.Package for an indirect dependency,
+ // which would then require that such created packages be explicitly
+ // inserted back into the Import graph as a final step after export data loading.
+ // (Hence this return is after the Types assignment.)
+ // The Diamond test exercises this case.
+ if !lpkg.needtypes && !lpkg.needsrc {
+ return
+ }
+ if !lpkg.needsrc {
+ if err := ld.loadFromExportData(lpkg); err != nil {
+ lpkg.Errors = append(lpkg.Errors, Error{
+ Pos: "-",
+ Msg: err.Error(),
+ Kind: UnknownError, // e.g. can't find/open/parse export data
+ })
+ }
+ return // not a source package, don't get syntax trees
+ }
+
+ appendError := func(err error) {
+ // Convert various error types into the one true Error.
+ var errs []Error
+ switch err := err.(type) {
+ case Error:
+ // from driver
+ errs = append(errs, err)
+
+ case *os.PathError:
+ // from parser
+ errs = append(errs, Error{
+ Pos: err.Path + ":1",
+ Msg: err.Err.Error(),
+ Kind: ParseError,
+ })
+
+ case scanner.ErrorList:
+ // from parser
+ for _, err := range err {
+ errs = append(errs, Error{
+ Pos: err.Pos.String(),
+ Msg: err.Msg,
+ Kind: ParseError,
+ })
+ }
+
+ case types.Error:
+ // from type checker
+ lpkg.TypeErrors = append(lpkg.TypeErrors, err)
+ errs = append(errs, Error{
+ Pos: err.Fset.Position(err.Pos).String(),
+ Msg: err.Msg,
+ Kind: TypeError,
+ })
+
+ default:
+ // unexpected impoverished error from parser?
+ errs = append(errs, Error{
+ Pos: "-",
+ Msg: err.Error(),
+ Kind: UnknownError,
+ })
+
+ // If you see this error message, please file a bug.
+ log.Printf("internal error: error %q (%T) without position", err, err)
+ }
+
+ lpkg.Errors = append(lpkg.Errors, errs...)
+ }
+
+ // If the go command on the PATH is newer than the runtime,
+ // then the go/{scanner,ast,parser,types} packages from the
+ // standard library may be unable to process the files
+ // selected by go list.
+ //
+ // There is currently no way to downgrade the effective
+ // version of the go command (see issue 52078), so we proceed
+ // with the newer go command but, in case of parse or type
+ // errors, we emit an additional diagnostic.
+ //
+ // See:
+ // - golang.org/issue/52078 (flag to set release tags)
+ // - golang.org/issue/50825 (gopls legacy version support)
+ // - golang.org/issue/55883 (go/packages confusing error)
+ //
+ // Should we assert a hard minimum of (currently) go1.16 here?
+ var runtimeVersion int
+ if _, err := fmt.Sscanf(runtime.Version(), "go1.%d", &runtimeVersion); err == nil && runtimeVersion < lpkg.goVersion {
+ defer func() {
+ if len(lpkg.Errors) > 0 {
+ appendError(Error{
+ Pos: "-",
+ Msg: fmt.Sprintf("This application uses version go1.%d of the source-processing packages but runs version go1.%d of 'go list'. It may fail to process source files that rely on newer language features. If so, rebuild the application using a newer version of Go.", runtimeVersion, lpkg.goVersion),
+ Kind: UnknownError,
+ })
+ }
+ }()
+ }
+
+ if ld.Config.Mode&NeedTypes != 0 && len(lpkg.CompiledGoFiles) == 0 && lpkg.ExportFile != "" {
+ // The config requested loading sources and types, but sources are missing.
+ // Add an error to the package and fall back to loading from export data.
+ appendError(Error{"-", fmt.Sprintf("sources missing for package %s", lpkg.ID), ParseError})
+ _ = ld.loadFromExportData(lpkg) // ignore any secondary errors
+
+ return // can't get syntax trees for this package
+ }
+
+ files, errs := ld.parseFiles(lpkg.CompiledGoFiles)
+ for _, err := range errs {
+ appendError(err)
+ }
+
+ lpkg.Syntax = files
+ if ld.Config.Mode&NeedTypes == 0 {
+ return
+ }
+
+ lpkg.TypesInfo = &types.Info{
+ Types: make(map[ast.Expr]types.TypeAndValue),
+ Defs: make(map[*ast.Ident]types.Object),
+ Uses: make(map[*ast.Ident]types.Object),
+ Implicits: make(map[ast.Node]types.Object),
+ Instances: make(map[*ast.Ident]types.Instance),
+ Scopes: make(map[ast.Node]*types.Scope),
+ Selections: make(map[*ast.SelectorExpr]*types.Selection),
+ }
+ versions.InitFileVersions(lpkg.TypesInfo)
+ lpkg.TypesSizes = ld.sizes
+
+ importer := importerFunc(func(path string) (*types.Package, error) {
+ if path == "unsafe" {
+ return types.Unsafe, nil
+ }
+
+ // The imports map is keyed by import path.
+ ipkg := lpkg.Imports[path]
+ if ipkg == nil {
+ if err := lpkg.importErrors[path]; err != nil {
+ return nil, err
+ }
+ // There was skew between the metadata and the
+ // import declarations, likely due to an edit
+ // race, or because the ParseFile feature was
+ // used to supply alternative file contents.
+ return nil, fmt.Errorf("no metadata for %s", path)
+ }
+
+ if ipkg.Types != nil && ipkg.Types.Complete() {
+ return ipkg.Types, nil
+ }
+ log.Fatalf("internal error: package %q without types was imported from %q", path, lpkg)
+ panic("unreachable")
+ })
+
+ // type-check
+ tc := &types.Config{
+ Importer: importer,
+
+ // Type-check bodies of functions only in initial packages.
+ // Example: for import graph A->B->C and initial packages {A,C},
+ // we can ignore function bodies in B.
+ IgnoreFuncBodies: ld.Mode&NeedDeps == 0 && !lpkg.initial,
+
+ Error: appendError,
+ Sizes: ld.sizes, // may be nil
+ }
+ if lpkg.Module != nil && lpkg.Module.GoVersion != "" {
+ typesinternal.SetGoVersion(tc, "go"+lpkg.Module.GoVersion)
+ }
+ if (ld.Mode & typecheckCgo) != 0 {
+ if !typesinternal.SetUsesCgo(tc) {
+ appendError(Error{
+ Msg: "typecheckCgo requires Go 1.15+",
+ Kind: ListError,
+ })
+ return
+ }
+ }
+ types.NewChecker(tc, ld.Fset, lpkg.Types, lpkg.TypesInfo).Files(lpkg.Syntax)
+
+ lpkg.importErrors = nil // no longer needed
+
+ // If !Cgo, the type-checker uses FakeImportC mode, so
+ // it doesn't invoke the importer for import "C",
+ // nor report an error for the import,
+ // or for any undefined C.f reference.
+ // We must detect this explicitly and correctly
+ // mark the package as IllTyped (by reporting an error).
+ // TODO(adonovan): if these errors are annoying,
+ // we could just set IllTyped quietly.
+ if tc.FakeImportC {
+ outer:
+ for _, f := range lpkg.Syntax {
+ for _, imp := range f.Imports {
+ if imp.Path.Value == `"C"` {
+ err := types.Error{Fset: ld.Fset, Pos: imp.Pos(), Msg: `import "C" ignored`}
+ appendError(err)
+ break outer
+ }
+ }
+ }
+ }
+
+ // Record accumulated errors.
+ illTyped := len(lpkg.Errors) > 0
+ if !illTyped {
+ for _, imp := range lpkg.Imports {
+ if imp.IllTyped {
+ illTyped = true
+ break
+ }
+ }
+ }
+ lpkg.IllTyped = illTyped
+}
+
+// An importFunc is an implementation of the single-method
+// types.Importer interface based on a function value.
+type importerFunc func(path string) (*types.Package, error)
+
+func (f importerFunc) Import(path string) (*types.Package, error) { return f(path) }
+
+// We use a counting semaphore to limit
+// the number of parallel I/O calls per process.
+var ioLimit = make(chan bool, 20)
+
+func (ld *loader) parseFile(filename string) (*ast.File, error) {
+ ld.parseCacheMu.Lock()
+ v, ok := ld.parseCache[filename]
+ if ok {
+ // cache hit
+ ld.parseCacheMu.Unlock()
+ <-v.ready
+ } else {
+ // cache miss
+ v = &parseValue{ready: make(chan struct{})}
+ ld.parseCache[filename] = v
+ ld.parseCacheMu.Unlock()
+
+ var src []byte
+ for f, contents := range ld.Config.Overlay {
+ if sameFile(f, filename) {
+ src = contents
+ }
+ }
+ var err error
+ if src == nil {
+ ioLimit <- true // wait
+ src, err = os.ReadFile(filename)
+ <-ioLimit // signal
+ }
+ if err != nil {
+ v.err = err
+ } else {
+ v.f, v.err = ld.ParseFile(ld.Fset, filename, src)
+ }
+
+ close(v.ready)
+ }
+ return v.f, v.err
+}
+
+// parseFiles reads and parses the Go source files and returns the ASTs
+// of the ones that could be at least partially parsed, along with a
+// list of I/O and parse errors encountered.
+//
+// Because files are scanned in parallel, the token.Pos
+// positions of the resulting ast.Files are not ordered.
+func (ld *loader) parseFiles(filenames []string) ([]*ast.File, []error) {
+ var wg sync.WaitGroup
+ n := len(filenames)
+ parsed := make([]*ast.File, n)
+ errors := make([]error, n)
+ for i, file := range filenames {
+ if ld.Config.Context.Err() != nil {
+ parsed[i] = nil
+ errors[i] = ld.Config.Context.Err()
+ continue
+ }
+ wg.Add(1)
+ go func(i int, filename string) {
+ parsed[i], errors[i] = ld.parseFile(filename)
+ wg.Done()
+ }(i, file)
+ }
+ wg.Wait()
+
+ // Eliminate nils, preserving order.
+ var o int
+ for _, f := range parsed {
+ if f != nil {
+ parsed[o] = f
+ o++
+ }
+ }
+ parsed = parsed[:o]
+
+ o = 0
+ for _, err := range errors {
+ if err != nil {
+ errors[o] = err
+ o++
+ }
+ }
+ errors = errors[:o]
+
+ return parsed, errors
+}
+
+// sameFile returns true if x and y have the same basename and denote
+// the same file.
+func sameFile(x, y string) bool {
+ if x == y {
+ // It could be the case that y doesn't exist.
+ // For instance, it may be an overlay file that
+ // hasn't been written to disk. To handle that case
+ // let x == y through. (We added the exact absolute path
+ // string to the CompiledGoFiles list, so the unwritten
+ // overlay case implies x==y.)
+ return true
+ }
+ if strings.EqualFold(filepath.Base(x), filepath.Base(y)) { // (optimisation)
+ if xi, err := os.Stat(x); err == nil {
+ if yi, err := os.Stat(y); err == nil {
+ return os.SameFile(xi, yi)
+ }
+ }
+ }
+ return false
+}
+
+// loadFromExportData ensures that type information is present for the specified
+// package, loading it from an export data file on the first request.
+// On success it sets lpkg.Types to a new Package.
+func (ld *loader) loadFromExportData(lpkg *loaderPackage) error {
+ if lpkg.PkgPath == "" {
+ log.Fatalf("internal error: Package %s has no PkgPath", lpkg)
+ }
+
+ // Because gcexportdata.Read has the potential to create or
+ // modify the types.Package for each node in the transitive
+ // closure of dependencies of lpkg, all exportdata operations
+ // must be sequential. (Finer-grained locking would require
+ // changes to the gcexportdata API.)
+ //
+ // The exportMu lock guards the lpkg.Types field and the
+ // types.Package it points to, for each loaderPackage in the graph.
+ //
+ // Not all accesses to Package.Pkg need to be protected by exportMu:
+ // graph ordering ensures that direct dependencies of source
+ // packages are fully loaded before the importer reads their Pkg field.
+ ld.exportMu.Lock()
+ defer ld.exportMu.Unlock()
+
+ if tpkg := lpkg.Types; tpkg != nil && tpkg.Complete() {
+ return nil // cache hit
+ }
+
+ lpkg.IllTyped = true // fail safe
+
+ if lpkg.ExportFile == "" {
+ // Errors while building export data will have been printed to stderr.
+ return fmt.Errorf("no export data file")
+ }
+ f, err := os.Open(lpkg.ExportFile)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+
+ // Read gc export data.
+ //
+ // We don't currently support gccgo export data because all
+ // underlying workspaces use the gc toolchain. (Even build
+ // systems that support gccgo don't use it for workspace
+ // queries.)
+ r, err := gcexportdata.NewReader(f)
+ if err != nil {
+ return fmt.Errorf("reading %s: %v", lpkg.ExportFile, err)
+ }
+
+ // Build the view.
+ //
+ // The gcexportdata machinery has no concept of package ID.
+ // It identifies packages by their PkgPath, which although not
+ // globally unique is unique within the scope of one invocation
+ // of the linker, type-checker, or gcexportdata.
+ //
+ // So, we must build a PkgPath-keyed view of the global
+ // (conceptually ID-keyed) cache of packages and pass it to
+ // gcexportdata. The view must contain every existing
+ // package that might possibly be mentioned by the
+ // current package---its transitive closure.
+ //
+ // In loadPackage, we unconditionally create a types.Package for
+ // each dependency so that export data loading does not
+ // create new ones.
+ //
+ // TODO(adonovan): it would be simpler and more efficient
+ // if the export data machinery invoked a callback to
+ // get-or-create a package instead of a map.
+ //
+ view := make(map[string]*types.Package) // view seen by gcexportdata
+ seen := make(map[*loaderPackage]bool) // all visited packages
+ var visit func(pkgs map[string]*Package)
+ visit = func(pkgs map[string]*Package) {
+ for _, p := range pkgs {
+ lpkg := ld.pkgs[p.ID]
+ if !seen[lpkg] {
+ seen[lpkg] = true
+ view[lpkg.PkgPath] = lpkg.Types
+ visit(lpkg.Imports)
+ }
+ }
+ }
+ visit(lpkg.Imports)
+
+ viewLen := len(view) + 1 // adding the self package
+ // Parse the export data.
+ // (May modify incomplete packages in view but not create new ones.)
+ tpkg, err := gcexportdata.Read(r, ld.Fset, view, lpkg.PkgPath)
+ if err != nil {
+ return fmt.Errorf("reading %s: %v", lpkg.ExportFile, err)
+ }
+ if _, ok := view["go.shape"]; ok {
+ // Account for the pseudopackage "go.shape" that gets
+ // created by generic code.
+ viewLen++
+ }
+ if viewLen != len(view) {
+ log.Panicf("golang.org/x/tools/go/packages: unexpected new packages during load of %s", lpkg.PkgPath)
+ }
+
+ lpkg.Types = tpkg
+ lpkg.IllTyped = false
+ return nil
+}
+
+// impliedLoadMode returns loadMode with its dependencies.
+func impliedLoadMode(loadMode LoadMode) LoadMode {
+ if loadMode&(NeedDeps|NeedTypes|NeedTypesInfo) != 0 {
+ // All these things require knowing the import graph.
+ loadMode |= NeedImports
+ }
+
+ return loadMode
+}
+
+func usesExportData(cfg *Config) bool {
+ return cfg.Mode&NeedExportFile != 0 || cfg.Mode&NeedTypes != 0 && cfg.Mode&NeedDeps == 0
+}
+
+var _ interface{} = io.Discard // assert build toolchain is go1.16 or later
diff --git a/vendor/golang.org/x/tools/go/packages/visit.go b/vendor/golang.org/x/tools/go/packages/visit.go
new file mode 100644
index 0000000000..a1dcc40b72
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/packages/visit.go
@@ -0,0 +1,59 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package packages
+
+import (
+ "fmt"
+ "os"
+ "sort"
+)
+
+// Visit visits all the packages in the import graph whose roots are
+// pkgs, calling the optional pre function the first time each package
+// is encountered (preorder), and the optional post function after a
+// package's dependencies have been visited (postorder).
+// The boolean result of pre(pkg) determines whether
+// the imports of package pkg are visited.
+func Visit(pkgs []*Package, pre func(*Package) bool, post func(*Package)) {
+ seen := make(map[*Package]bool)
+ var visit func(*Package)
+ visit = func(pkg *Package) {
+ if !seen[pkg] {
+ seen[pkg] = true
+
+ if pre == nil || pre(pkg) {
+ paths := make([]string, 0, len(pkg.Imports))
+ for path := range pkg.Imports {
+ paths = append(paths, path)
+ }
+ sort.Strings(paths) // Imports is a map, this makes visit stable
+ for _, path := range paths {
+ visit(pkg.Imports[path])
+ }
+ }
+
+ if post != nil {
+ post(pkg)
+ }
+ }
+ }
+ for _, pkg := range pkgs {
+ visit(pkg)
+ }
+}
+
+// PrintErrors prints to os.Stderr the accumulated errors of all
+// packages in the import graph rooted at pkgs, dependencies first.
+// PrintErrors returns the number of errors printed.
+func PrintErrors(pkgs []*Package) int {
+ var n int
+ Visit(pkgs, nil, func(pkg *Package) {
+ for _, err := range pkg.Errors {
+ fmt.Fprintln(os.Stderr, err)
+ n++
+ }
+ })
+ return n
+}
diff --git a/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go b/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go
new file mode 100644
index 0000000000..11d5c8c3ad
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go
@@ -0,0 +1,752 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package objectpath defines a naming scheme for types.Objects
+// (that is, named entities in Go programs) relative to their enclosing
+// package.
+//
+// Type-checker objects are canonical, so they are usually identified by
+// their address in memory (a pointer), but a pointer has meaning only
+// within one address space. By contrast, objectpath names allow the
+// identity of an object to be sent from one program to another,
+// establishing a correspondence between types.Object variables that are
+// distinct but logically equivalent.
+//
+// A single object may have multiple paths. In this example,
+//
+// type A struct{ X int }
+// type B A
+//
+// the field X has two paths due to its membership of both A and B.
+// The For(obj) function always returns one of these paths, arbitrarily
+// but consistently.
+package objectpath
+
+import (
+ "fmt"
+ "go/types"
+ "strconv"
+ "strings"
+
+ "golang.org/x/tools/internal/typeparams"
+)
+
+// A Path is an opaque name that identifies a types.Object
+// relative to its package. Conceptually, the name consists of a
+// sequence of destructuring operations applied to the package scope
+// to obtain the original object.
+// The name does not include the package itself.
+type Path string
+
+// Encoding
+//
+// An object path is a textual and (with training) human-readable encoding
+// of a sequence of destructuring operators, starting from a types.Package.
+// The sequences represent a path through the package/object/type graph.
+// We classify these operators by their type:
+//
+// PO package->object Package.Scope.Lookup
+// OT object->type Object.Type
+// TT type->type Type.{Elem,Key,Params,Results,Underlying} [EKPRU]
+// TO type->object Type.{At,Field,Method,Obj} [AFMO]
+//
+// All valid paths start with a package and end at an object
+// and thus may be defined by the regular language:
+//
+// objectpath = PO (OT TT* TO)*
+//
+// The concrete encoding follows directly:
+// - The only PO operator is Package.Scope.Lookup, which requires an identifier.
+// - The only OT operator is Object.Type,
+// which we encode as '.' because dot cannot appear in an identifier.
+// - The TT operators are encoded as [EKPRUTC];
+// one of these (TypeParam) requires an integer operand,
+// which is encoded as a string of decimal digits.
+// - The TO operators are encoded as [AFMO];
+// three of these (At,Field,Method) require an integer operand,
+// which is encoded as a string of decimal digits.
+// These indices are stable across different representations
+// of the same package, even source and export data.
+// The indices used are implementation specific and may not correspond to
+// the argument to the go/types function.
+//
+// In the example below,
+//
+// package p
+//
+// type T interface {
+// f() (a string, b struct{ X int })
+// }
+//
+// field X has the path "T.UM0.RA1.F0",
+// representing the following sequence of operations:
+//
+// p.Lookup("T") T
+// .Type().Underlying().Method(0). f
+// .Type().Results().At(1) b
+// .Type().Field(0) X
+//
+// The encoding is not maximally compact---every R or P is
+// followed by an A, for example---but this simplifies the
+// encoder and decoder.
+const (
+ // object->type operators
+ opType = '.' // .Type() (Object)
+
+ // type->type operators
+ opElem = 'E' // .Elem() (Pointer, Slice, Array, Chan, Map)
+ opKey = 'K' // .Key() (Map)
+ opParams = 'P' // .Params() (Signature)
+ opResults = 'R' // .Results() (Signature)
+ opUnderlying = 'U' // .Underlying() (Named)
+ opTypeParam = 'T' // .TypeParams.At(i) (Named, Signature)
+ opConstraint = 'C' // .Constraint() (TypeParam)
+
+ // type->object operators
+ opAt = 'A' // .At(i) (Tuple)
+ opField = 'F' // .Field(i) (Struct)
+ opMethod = 'M' // .Method(i) (Named or Interface; not Struct: "promoted" names are ignored)
+ opObj = 'O' // .Obj() (Named, TypeParam)
+)
+
+// For is equivalent to new(Encoder).For(obj).
+//
+// It may be more efficient to reuse a single Encoder across several calls.
+func For(obj types.Object) (Path, error) {
+ return new(Encoder).For(obj)
+}
+
+// An Encoder amortizes the cost of encoding the paths of multiple objects.
+// The zero value of an Encoder is ready to use.
+type Encoder struct {
+ scopeMemo map[*types.Scope][]types.Object // memoization of scopeObjects
+}
+
+// For returns the path to an object relative to its package,
+// or an error if the object is not accessible from the package's Scope.
+//
+// The For function guarantees to return a path only for the following objects:
+// - package-level types
+// - exported package-level non-types
+// - methods
+// - parameter and result variables
+// - struct fields
+// These objects are sufficient to define the API of their package.
+// The objects described by a package's export data are drawn from this set.
+//
+// The set of objects accessible from a package's Scope depends on
+// whether the package was produced by type-checking syntax, or
+// reading export data; the latter may have a smaller Scope since
+// export data trims objects that are not reachable from an exported
+// declaration. For example, the For function will return a path for
+// an exported method of an unexported type that is not reachable
+// from any public declaration; this path will cause the Object
+// function to fail if called on a package loaded from export data.
+// TODO(adonovan): is this a bug or feature? Should this package
+// compute accessibility in the same way?
+//
+// For does not return a path for predeclared names, imported package
+// names, local names, and unexported package-level names (except
+// types).
+//
+// Example: given this definition,
+//
+// package p
+//
+// type T interface {
+// f() (a string, b struct{ X int })
+// }
+//
+// For(X) would return a path that denotes the following sequence of operations:
+//
+// p.Scope().Lookup("T") (TypeName T)
+// .Type().Underlying().Method(0). (method Func f)
+// .Type().Results().At(1) (field Var b)
+// .Type().Field(0) (field Var X)
+//
+// where p is the package (*types.Package) to which X belongs.
+func (enc *Encoder) For(obj types.Object) (Path, error) {
+ pkg := obj.Pkg()
+
+ // This table lists the cases of interest.
+ //
+ // Object Action
+ // ------ ------
+ // nil reject
+ // builtin reject
+ // pkgname reject
+ // label reject
+ // var
+ // package-level accept
+ // func param/result accept
+ // local reject
+ // struct field accept
+ // const
+ // package-level accept
+ // local reject
+ // func
+ // package-level accept
+ // init functions reject
+ // concrete method accept
+ // interface method accept
+ // type
+ // package-level accept
+ // local reject
+ //
+ // The only accessible package-level objects are members of pkg itself.
+ //
+ // The cases are handled in four steps:
+ //
+ // 1. reject nil and builtin
+ // 2. accept package-level objects
+ // 3. reject obviously invalid objects
+ // 4. search the API for the path to the param/result/field/method.
+
+ // 1. reference to nil or builtin?
+ if pkg == nil {
+ return "", fmt.Errorf("predeclared %s has no path", obj)
+ }
+ scope := pkg.Scope()
+
+ // 2. package-level object?
+ if scope.Lookup(obj.Name()) == obj {
+ // Only exported objects (and non-exported types) have a path.
+ // Non-exported types may be referenced by other objects.
+ if _, ok := obj.(*types.TypeName); !ok && !obj.Exported() {
+ return "", fmt.Errorf("no path for non-exported %v", obj)
+ }
+ return Path(obj.Name()), nil
+ }
+
+ // 3. Not a package-level object.
+ // Reject obviously non-viable cases.
+ switch obj := obj.(type) {
+ case *types.TypeName:
+ if _, ok := obj.Type().(*types.TypeParam); !ok {
+ // With the exception of type parameters, only package-level type names
+ // have a path.
+ return "", fmt.Errorf("no path for %v", obj)
+ }
+ case *types.Const, // Only package-level constants have a path.
+ *types.Label, // Labels are function-local.
+ *types.PkgName: // PkgNames are file-local.
+ return "", fmt.Errorf("no path for %v", obj)
+
+ case *types.Var:
+ // Could be:
+ // - a field (obj.IsField())
+ // - a func parameter or result
+ // - a local var.
+ // Sadly there is no way to distinguish
+ // a param/result from a local
+ // so we must proceed to the find.
+
+ case *types.Func:
+ // A func, if not package-level, must be a method.
+ if recv := obj.Type().(*types.Signature).Recv(); recv == nil {
+ return "", fmt.Errorf("func is not a method: %v", obj)
+ }
+
+ if path, ok := enc.concreteMethod(obj); ok {
+ // Fast path for concrete methods that avoids looping over scope.
+ return path, nil
+ }
+
+ default:
+ panic(obj)
+ }
+
+ // 4. Search the API for the path to the var (field/param/result) or method.
+
+ // First inspect package-level named types.
+ // In the presence of path aliases, these give
+ // the best paths because non-types may
+ // refer to types, but not the reverse.
+ empty := make([]byte, 0, 48) // initial space
+ objs := enc.scopeObjects(scope)
+ for _, o := range objs {
+ tname, ok := o.(*types.TypeName)
+ if !ok {
+ continue // handle non-types in second pass
+ }
+
+ path := append(empty, o.Name()...)
+ path = append(path, opType)
+
+ T := o.Type()
+
+ if tname.IsAlias() {
+ // type alias
+ if r := find(obj, T, path, nil); r != nil {
+ return Path(r), nil
+ }
+ } else {
+ if named, _ := T.(*types.Named); named != nil {
+ if r := findTypeParam(obj, named.TypeParams(), path, nil); r != nil {
+ // generic named type
+ return Path(r), nil
+ }
+ }
+ // defined (named) type
+ if r := find(obj, T.Underlying(), append(path, opUnderlying), nil); r != nil {
+ return Path(r), nil
+ }
+ }
+ }
+
+ // Then inspect everything else:
+ // non-types, and declared methods of defined types.
+ for _, o := range objs {
+ path := append(empty, o.Name()...)
+ if _, ok := o.(*types.TypeName); !ok {
+ if o.Exported() {
+ // exported non-type (const, var, func)
+ if r := find(obj, o.Type(), append(path, opType), nil); r != nil {
+ return Path(r), nil
+ }
+ }
+ continue
+ }
+
+ // Inspect declared methods of defined types.
+ if T, ok := o.Type().(*types.Named); ok {
+ path = append(path, opType)
+ // The method index here is always with respect
+ // to the underlying go/types data structures,
+ // which ultimately derives from source order
+ // and must be preserved by export data.
+ for i := 0; i < T.NumMethods(); i++ {
+ m := T.Method(i)
+ path2 := appendOpArg(path, opMethod, i)
+ if m == obj {
+ return Path(path2), nil // found declared method
+ }
+ if r := find(obj, m.Type(), append(path2, opType), nil); r != nil {
+ return Path(r), nil
+ }
+ }
+ }
+ }
+
+ return "", fmt.Errorf("can't find path for %v in %s", obj, pkg.Path())
+}
+
+func appendOpArg(path []byte, op byte, arg int) []byte {
+ path = append(path, op)
+ path = strconv.AppendInt(path, int64(arg), 10)
+ return path
+}
+
+// concreteMethod returns the path for meth, which must have a non-nil receiver.
+// The second return value indicates success and may be false if the method is
+// an interface method or if it is an instantiated method.
+//
+// This function is just an optimization that avoids the general scope walking
+// approach. You are expected to fall back to the general approach if this
+// function fails.
+func (enc *Encoder) concreteMethod(meth *types.Func) (Path, bool) {
+ // Concrete methods can only be declared on package-scoped named types. For
+ // that reason we can skip the expensive walk over the package scope: the
+ // path will always be package -> named type -> method. We can trivially get
+ // the type name from the receiver, and only have to look over the type's
+ // methods to find the method index.
+ //
+ // Methods on generic types require special consideration, however. Consider
+ // the following package:
+ //
+ // L1: type S[T any] struct{}
+ // L2: func (recv S[A]) Foo() { recv.Bar() }
+ // L3: func (recv S[B]) Bar() { }
+ // L4: type Alias = S[int]
+ // L5: func _[T any]() { var s S[int]; s.Foo() }
+ //
+ // The receivers of methods on generic types are instantiations. L2 and L3
+ // instantiate S with the type-parameters A and B, which are scoped to the
+ // respective methods. L4 and L5 each instantiate S with int. Each of these
+ // instantiations has its own method set, full of methods (and thus objects)
+ // with receivers whose types are the respective instantiations. In other
+ // words, we have
+ //
+ // S[A].Foo, S[A].Bar
+ // S[B].Foo, S[B].Bar
+ // S[int].Foo, S[int].Bar
+ //
+ // We may thus be trying to produce object paths for any of these objects.
+ //
+ // S[A].Foo and S[B].Bar are the origin methods, and their paths are S.Foo
+ // and S.Bar, which are the paths that this function naturally produces.
+ //
+ // S[A].Bar, S[B].Foo, and both methods on S[int] are instantiations that
+ // don't correspond to the origin methods. For S[int], this is significant.
+ // The most precise object path for S[int].Foo, for example, is Alias.Foo,
+ // not S.Foo. Our function, however, would produce S.Foo, which would
+ // resolve to a different object.
+ //
+ // For S[A].Bar and S[B].Foo it could be argued that S.Bar and S.Foo are
+ // still the correct paths, since only the origin methods have meaningful
+ // paths. But this is likely only true for trivial cases and has edge cases.
+ // Since this function is only an optimization, we err on the side of giving
+ // up, deferring to the slower but definitely correct algorithm. Most users
+ // of objectpath will only be giving us origin methods, anyway, as referring
+ // to instantiated methods is usually not useful.
+
+ if typeparams.OriginMethod(meth) != meth {
+ return "", false
+ }
+
+ recvT := meth.Type().(*types.Signature).Recv().Type()
+ if ptr, ok := recvT.(*types.Pointer); ok {
+ recvT = ptr.Elem()
+ }
+
+ named, ok := recvT.(*types.Named)
+ if !ok {
+ return "", false
+ }
+
+ if types.IsInterface(named) {
+ // Named interfaces don't have to be package-scoped
+ //
+ // TODO(dominikh): opt: if scope.Lookup(name) == named, then we can apply this optimization to interface
+ // methods, too, I think.
+ return "", false
+ }
+
+ // Preallocate space for the name, opType, opMethod, and some digits.
+ name := named.Obj().Name()
+ path := make([]byte, 0, len(name)+8)
+ path = append(path, name...)
+ path = append(path, opType)
+
+ // Method indices are w.r.t. the go/types data structures,
+ // ultimately deriving from source order,
+ // which is preserved by export data.
+ for i := 0; i < named.NumMethods(); i++ {
+ if named.Method(i) == meth {
+ path = appendOpArg(path, opMethod, i)
+ return Path(path), true
+ }
+ }
+
+ // Due to golang/go#59944, go/types fails to associate the receiver with
+ // certain methods on cgo types.
+ //
+ // TODO(rfindley): replace this panic once golang/go#59944 is fixed in all Go
+ // versions gopls supports.
+ return "", false
+ // panic(fmt.Sprintf("couldn't find method %s on type %s; methods: %#v", meth, named, enc.namedMethods(named)))
+}
+
+// find finds obj within type T, returning the path to it, or nil if not found.
+//
+// The seen map is used to short circuit cycles through type parameters. If
+// nil, it will be allocated as necessary.
+func find(obj types.Object, T types.Type, path []byte, seen map[*types.TypeName]bool) []byte {
+ switch T := T.(type) {
+ case *types.Basic, *types.Named:
+ // Named types belonging to pkg were handled already,
+ // so T must belong to another package. No path.
+ return nil
+ case *types.Pointer:
+ return find(obj, T.Elem(), append(path, opElem), seen)
+ case *types.Slice:
+ return find(obj, T.Elem(), append(path, opElem), seen)
+ case *types.Array:
+ return find(obj, T.Elem(), append(path, opElem), seen)
+ case *types.Chan:
+ return find(obj, T.Elem(), append(path, opElem), seen)
+ case *types.Map:
+ if r := find(obj, T.Key(), append(path, opKey), seen); r != nil {
+ return r
+ }
+ return find(obj, T.Elem(), append(path, opElem), seen)
+ case *types.Signature:
+ if r := findTypeParam(obj, T.TypeParams(), path, seen); r != nil {
+ return r
+ }
+ if r := find(obj, T.Params(), append(path, opParams), seen); r != nil {
+ return r
+ }
+ return find(obj, T.Results(), append(path, opResults), seen)
+ case *types.Struct:
+ for i := 0; i < T.NumFields(); i++ {
+ fld := T.Field(i)
+ path2 := appendOpArg(path, opField, i)
+ if fld == obj {
+ return path2 // found field var
+ }
+ if r := find(obj, fld.Type(), append(path2, opType), seen); r != nil {
+ return r
+ }
+ }
+ return nil
+ case *types.Tuple:
+ for i := 0; i < T.Len(); i++ {
+ v := T.At(i)
+ path2 := appendOpArg(path, opAt, i)
+ if v == obj {
+ return path2 // found param/result var
+ }
+ if r := find(obj, v.Type(), append(path2, opType), seen); r != nil {
+ return r
+ }
+ }
+ return nil
+ case *types.Interface:
+ for i := 0; i < T.NumMethods(); i++ {
+ m := T.Method(i)
+ path2 := appendOpArg(path, opMethod, i)
+ if m == obj {
+ return path2 // found interface method
+ }
+ if r := find(obj, m.Type(), append(path2, opType), seen); r != nil {
+ return r
+ }
+ }
+ return nil
+ case *types.TypeParam:
+ name := T.Obj()
+ if name == obj {
+ return append(path, opObj)
+ }
+ if seen[name] {
+ return nil
+ }
+ if seen == nil {
+ seen = make(map[*types.TypeName]bool)
+ }
+ seen[name] = true
+ if r := find(obj, T.Constraint(), append(path, opConstraint), seen); r != nil {
+ return r
+ }
+ return nil
+ }
+ panic(T)
+}
+
+func findTypeParam(obj types.Object, list *types.TypeParamList, path []byte, seen map[*types.TypeName]bool) []byte {
+ for i := 0; i < list.Len(); i++ {
+ tparam := list.At(i)
+ path2 := appendOpArg(path, opTypeParam, i)
+ if r := find(obj, tparam, path2, seen); r != nil {
+ return r
+ }
+ }
+ return nil
+}
+
+// Object returns the object denoted by path p within the package pkg.
+func Object(pkg *types.Package, p Path) (types.Object, error) {
+ pathstr := string(p)
+ if pathstr == "" {
+ return nil, fmt.Errorf("empty path")
+ }
+
+ var pkgobj, suffix string
+ if dot := strings.IndexByte(pathstr, opType); dot < 0 {
+ pkgobj = pathstr
+ } else {
+ pkgobj = pathstr[:dot]
+ suffix = pathstr[dot:] // suffix starts with "."
+ }
+
+ obj := pkg.Scope().Lookup(pkgobj)
+ if obj == nil {
+ return nil, fmt.Errorf("package %s does not contain %q", pkg.Path(), pkgobj)
+ }
+
+ // abstraction of *types.{Pointer,Slice,Array,Chan,Map}
+ type hasElem interface {
+ Elem() types.Type
+ }
+ // abstraction of *types.{Named,Signature}
+ type hasTypeParams interface {
+ TypeParams() *types.TypeParamList
+ }
+ // abstraction of *types.{Named,TypeParam}
+ type hasObj interface {
+ Obj() *types.TypeName
+ }
+
+ // The loop state is the pair (t, obj),
+ // exactly one of which is non-nil, initially obj.
+ // All suffixes start with '.' (the only object->type operation),
+ // followed by optional type->type operations,
+ // then a type->object operation.
+ // The cycle then repeats.
+ var t types.Type
+ for suffix != "" {
+ code := suffix[0]
+ suffix = suffix[1:]
+
+ // Codes [AFM] have an integer operand.
+ var index int
+ switch code {
+ case opAt, opField, opMethod, opTypeParam:
+ rest := strings.TrimLeft(suffix, "0123456789")
+ numerals := suffix[:len(suffix)-len(rest)]
+ suffix = rest
+ i, err := strconv.Atoi(numerals)
+ if err != nil {
+ return nil, fmt.Errorf("invalid path: bad numeric operand %q for code %q", numerals, code)
+ }
+ index = int(i)
+ case opObj:
+ // no operand
+ default:
+ // The suffix must end with a type->object operation.
+ if suffix == "" {
+ return nil, fmt.Errorf("invalid path: ends with %q, want [AFMO]", code)
+ }
+ }
+
+ if code == opType {
+ if t != nil {
+ return nil, fmt.Errorf("invalid path: unexpected %q in type context", opType)
+ }
+ t = obj.Type()
+ obj = nil
+ continue
+ }
+
+ if t == nil {
+ return nil, fmt.Errorf("invalid path: code %q in object context", code)
+ }
+
+ // Inv: t != nil, obj == nil
+
+ switch code {
+ case opElem:
+ hasElem, ok := t.(hasElem) // Pointer, Slice, Array, Chan, Map
+ if !ok {
+ return nil, fmt.Errorf("cannot apply %q to %s (got %T, want pointer, slice, array, chan or map)", code, t, t)
+ }
+ t = hasElem.Elem()
+
+ case opKey:
+ mapType, ok := t.(*types.Map)
+ if !ok {
+ return nil, fmt.Errorf("cannot apply %q to %s (got %T, want map)", code, t, t)
+ }
+ t = mapType.Key()
+
+ case opParams:
+ sig, ok := t.(*types.Signature)
+ if !ok {
+ return nil, fmt.Errorf("cannot apply %q to %s (got %T, want signature)", code, t, t)
+ }
+ t = sig.Params()
+
+ case opResults:
+ sig, ok := t.(*types.Signature)
+ if !ok {
+ return nil, fmt.Errorf("cannot apply %q to %s (got %T, want signature)", code, t, t)
+ }
+ t = sig.Results()
+
+ case opUnderlying:
+ named, ok := t.(*types.Named)
+ if !ok {
+ return nil, fmt.Errorf("cannot apply %q to %s (got %T, want named)", code, t, t)
+ }
+ t = named.Underlying()
+
+ case opTypeParam:
+ hasTypeParams, ok := t.(hasTypeParams) // Named, Signature
+ if !ok {
+ return nil, fmt.Errorf("cannot apply %q to %s (got %T, want named or signature)", code, t, t)
+ }
+ tparams := hasTypeParams.TypeParams()
+ if n := tparams.Len(); index >= n {
+ return nil, fmt.Errorf("tuple index %d out of range [0-%d)", index, n)
+ }
+ t = tparams.At(index)
+
+ case opConstraint:
+ tparam, ok := t.(*types.TypeParam)
+ if !ok {
+ return nil, fmt.Errorf("cannot apply %q to %s (got %T, want type parameter)", code, t, t)
+ }
+ t = tparam.Constraint()
+
+ case opAt:
+ tuple, ok := t.(*types.Tuple)
+ if !ok {
+ return nil, fmt.Errorf("cannot apply %q to %s (got %T, want tuple)", code, t, t)
+ }
+ if n := tuple.Len(); index >= n {
+ return nil, fmt.Errorf("tuple index %d out of range [0-%d)", index, n)
+ }
+ obj = tuple.At(index)
+ t = nil
+
+ case opField:
+ structType, ok := t.(*types.Struct)
+ if !ok {
+ return nil, fmt.Errorf("cannot apply %q to %s (got %T, want struct)", code, t, t)
+ }
+ if n := structType.NumFields(); index >= n {
+ return nil, fmt.Errorf("field index %d out of range [0-%d)", index, n)
+ }
+ obj = structType.Field(index)
+ t = nil
+
+ case opMethod:
+ switch t := t.(type) {
+ case *types.Interface:
+ if index >= t.NumMethods() {
+ return nil, fmt.Errorf("method index %d out of range [0-%d)", index, t.NumMethods())
+ }
+ obj = t.Method(index) // Id-ordered
+
+ case *types.Named:
+ if index >= t.NumMethods() {
+ return nil, fmt.Errorf("method index %d out of range [0-%d)", index, t.NumMethods())
+ }
+ obj = t.Method(index)
+
+ default:
+ return nil, fmt.Errorf("cannot apply %q to %s (got %T, want interface or named)", code, t, t)
+ }
+ t = nil
+
+ case opObj:
+ hasObj, ok := t.(hasObj)
+ if !ok {
+ return nil, fmt.Errorf("cannot apply %q to %s (got %T, want named or type param)", code, t, t)
+ }
+ obj = hasObj.Obj()
+ t = nil
+
+ default:
+ return nil, fmt.Errorf("invalid path: unknown code %q", code)
+ }
+ }
+
+ if obj.Pkg() != pkg {
+ return nil, fmt.Errorf("path denotes %s, which belongs to a different package", obj)
+ }
+
+ return obj, nil // success
+}
+
+// scopeObjects is a memoization of scope objects.
+// Callers must not modify the result.
+func (enc *Encoder) scopeObjects(scope *types.Scope) []types.Object {
+ m := enc.scopeMemo
+ if m == nil {
+ m = make(map[*types.Scope][]types.Object)
+ enc.scopeMemo = m
+ }
+ objs, ok := m[scope]
+ if !ok {
+ names := scope.Names() // allocates and sorts
+ objs = make([]types.Object, len(names))
+ for i, name := range names {
+ objs[i] = scope.Lookup(name)
+ }
+ m[scope] = objs
+ }
+ return objs
+}
diff --git a/vendor/golang.org/x/tools/internal/event/core/event.go b/vendor/golang.org/x/tools/internal/event/core/event.go
new file mode 100644
index 0000000000..a6cf0e64a4
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/event/core/event.go
@@ -0,0 +1,85 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package core provides support for event based telemetry.
+package core
+
+import (
+ "fmt"
+ "time"
+
+ "golang.org/x/tools/internal/event/label"
+)
+
+// Event holds the information about an event of note that occurred.
+type Event struct {
+ at time.Time
+
+ // As events are often on the stack, storing the first few labels directly
+ // in the event can avoid an allocation at all for the very common cases of
+ // simple events.
+ // The length needs to be large enough to cope with the majority of events
+ // but no so large as to cause undue stack pressure.
+ // A log message with two values will use 3 labels (one for each value and
+ // one for the message itself).
+
+ static [3]label.Label // inline storage for the first few labels
+ dynamic []label.Label // dynamically sized storage for remaining labels
+}
+
+// eventLabelMap implements label.Map for a the labels of an Event.
+type eventLabelMap struct {
+ event Event
+}
+
+func (ev Event) At() time.Time { return ev.at }
+
+func (ev Event) Format(f fmt.State, r rune) {
+ if !ev.at.IsZero() {
+ fmt.Fprint(f, ev.at.Format("2006/01/02 15:04:05 "))
+ }
+ for index := 0; ev.Valid(index); index++ {
+ if l := ev.Label(index); l.Valid() {
+ fmt.Fprintf(f, "\n\t%v", l)
+ }
+ }
+}
+
+func (ev Event) Valid(index int) bool {
+ return index >= 0 && index < len(ev.static)+len(ev.dynamic)
+}
+
+func (ev Event) Label(index int) label.Label {
+ if index < len(ev.static) {
+ return ev.static[index]
+ }
+ return ev.dynamic[index-len(ev.static)]
+}
+
+func (ev Event) Find(key label.Key) label.Label {
+ for _, l := range ev.static {
+ if l.Key() == key {
+ return l
+ }
+ }
+ for _, l := range ev.dynamic {
+ if l.Key() == key {
+ return l
+ }
+ }
+ return label.Label{}
+}
+
+func MakeEvent(static [3]label.Label, labels []label.Label) Event {
+ return Event{
+ static: static,
+ dynamic: labels,
+ }
+}
+
+// CloneEvent event returns a copy of the event with the time adjusted to at.
+func CloneEvent(ev Event, at time.Time) Event {
+ ev.at = at
+ return ev
+}
diff --git a/vendor/golang.org/x/tools/internal/event/core/export.go b/vendor/golang.org/x/tools/internal/event/core/export.go
new file mode 100644
index 0000000000..05f3a9a579
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/event/core/export.go
@@ -0,0 +1,70 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package core
+
+import (
+ "context"
+ "sync/atomic"
+ "time"
+ "unsafe"
+
+ "golang.org/x/tools/internal/event/label"
+)
+
+// Exporter is a function that handles events.
+// It may return a modified context and event.
+type Exporter func(context.Context, Event, label.Map) context.Context
+
+var (
+ exporter unsafe.Pointer
+)
+
+// SetExporter sets the global exporter function that handles all events.
+// The exporter is called synchronously from the event call site, so it should
+// return quickly so as not to hold up user code.
+func SetExporter(e Exporter) {
+ p := unsafe.Pointer(&e)
+ if e == nil {
+ // &e is always valid, and so p is always valid, but for the early abort
+ // of ProcessEvent to be efficient it needs to make the nil check on the
+ // pointer without having to dereference it, so we make the nil function
+ // also a nil pointer
+ p = nil
+ }
+ atomic.StorePointer(&exporter, p)
+}
+
+// deliver is called to deliver an event to the supplied exporter.
+// it will fill in the time.
+func deliver(ctx context.Context, exporter Exporter, ev Event) context.Context {
+ // add the current time to the event
+ ev.at = time.Now()
+ // hand the event off to the current exporter
+ return exporter(ctx, ev, ev)
+}
+
+// Export is called to deliver an event to the global exporter if set.
+func Export(ctx context.Context, ev Event) context.Context {
+ // get the global exporter and abort early if there is not one
+ exporterPtr := (*Exporter)(atomic.LoadPointer(&exporter))
+ if exporterPtr == nil {
+ return ctx
+ }
+ return deliver(ctx, *exporterPtr, ev)
+}
+
+// ExportPair is called to deliver a start event to the supplied exporter.
+// It also returns a function that will deliver the end event to the same
+// exporter.
+// It will fill in the time.
+func ExportPair(ctx context.Context, begin, end Event) (context.Context, func()) {
+ // get the global exporter and abort early if there is not one
+ exporterPtr := (*Exporter)(atomic.LoadPointer(&exporter))
+ if exporterPtr == nil {
+ return ctx, func() {}
+ }
+ ctx = deliver(ctx, *exporterPtr, begin)
+ return ctx, func() { deliver(ctx, *exporterPtr, end) }
+}
diff --git a/vendor/golang.org/x/tools/internal/event/core/fast.go b/vendor/golang.org/x/tools/internal/event/core/fast.go
new file mode 100644
index 0000000000..06c1d4615e
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/event/core/fast.go
@@ -0,0 +1,77 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package core
+
+import (
+ "context"
+
+ "golang.org/x/tools/internal/event/keys"
+ "golang.org/x/tools/internal/event/label"
+)
+
+// Log1 takes a message and one label delivers a log event to the exporter.
+// It is a customized version of Print that is faster and does no allocation.
+func Log1(ctx context.Context, message string, t1 label.Label) {
+ Export(ctx, MakeEvent([3]label.Label{
+ keys.Msg.Of(message),
+ t1,
+ }, nil))
+}
+
+// Log2 takes a message and two labels and delivers a log event to the exporter.
+// It is a customized version of Print that is faster and does no allocation.
+func Log2(ctx context.Context, message string, t1 label.Label, t2 label.Label) {
+ Export(ctx, MakeEvent([3]label.Label{
+ keys.Msg.Of(message),
+ t1,
+ t2,
+ }, nil))
+}
+
+// Metric1 sends a label event to the exporter with the supplied labels.
+func Metric1(ctx context.Context, t1 label.Label) context.Context {
+ return Export(ctx, MakeEvent([3]label.Label{
+ keys.Metric.New(),
+ t1,
+ }, nil))
+}
+
+// Metric2 sends a label event to the exporter with the supplied labels.
+func Metric2(ctx context.Context, t1, t2 label.Label) context.Context {
+ return Export(ctx, MakeEvent([3]label.Label{
+ keys.Metric.New(),
+ t1,
+ t2,
+ }, nil))
+}
+
+// Start1 sends a span start event with the supplied label list to the exporter.
+// It also returns a function that will end the span, which should normally be
+// deferred.
+func Start1(ctx context.Context, name string, t1 label.Label) (context.Context, func()) {
+ return ExportPair(ctx,
+ MakeEvent([3]label.Label{
+ keys.Start.Of(name),
+ t1,
+ }, nil),
+ MakeEvent([3]label.Label{
+ keys.End.New(),
+ }, nil))
+}
+
+// Start2 sends a span start event with the supplied label list to the exporter.
+// It also returns a function that will end the span, which should normally be
+// deferred.
+func Start2(ctx context.Context, name string, t1, t2 label.Label) (context.Context, func()) {
+ return ExportPair(ctx,
+ MakeEvent([3]label.Label{
+ keys.Start.Of(name),
+ t1,
+ t2,
+ }, nil),
+ MakeEvent([3]label.Label{
+ keys.End.New(),
+ }, nil))
+}
diff --git a/vendor/golang.org/x/tools/internal/event/doc.go b/vendor/golang.org/x/tools/internal/event/doc.go
new file mode 100644
index 0000000000..5dc6e6babe
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/event/doc.go
@@ -0,0 +1,7 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package event provides a set of packages that cover the main
+// concepts of telemetry in an implementation agnostic way.
+package event
diff --git a/vendor/golang.org/x/tools/internal/event/event.go b/vendor/golang.org/x/tools/internal/event/event.go
new file mode 100644
index 0000000000..4d55e577d1
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/event/event.go
@@ -0,0 +1,127 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package event
+
+import (
+ "context"
+
+ "golang.org/x/tools/internal/event/core"
+ "golang.org/x/tools/internal/event/keys"
+ "golang.org/x/tools/internal/event/label"
+)
+
+// Exporter is a function that handles events.
+// It may return a modified context and event.
+type Exporter func(context.Context, core.Event, label.Map) context.Context
+
+// SetExporter sets the global exporter function that handles all events.
+// The exporter is called synchronously from the event call site, so it should
+// return quickly so as not to hold up user code.
+func SetExporter(e Exporter) {
+ core.SetExporter(core.Exporter(e))
+}
+
+// Log takes a message and a label list and combines them into a single event
+// before delivering them to the exporter.
+func Log(ctx context.Context, message string, labels ...label.Label) {
+ core.Export(ctx, core.MakeEvent([3]label.Label{
+ keys.Msg.Of(message),
+ }, labels))
+}
+
+// IsLog returns true if the event was built by the Log function.
+// It is intended to be used in exporters to identify the semantics of the
+// event when deciding what to do with it.
+func IsLog(ev core.Event) bool {
+ return ev.Label(0).Key() == keys.Msg
+}
+
+// Error takes a message and a label list and combines them into a single event
+// before delivering them to the exporter. It captures the error in the
+// delivered event.
+func Error(ctx context.Context, message string, err error, labels ...label.Label) {
+ core.Export(ctx, core.MakeEvent([3]label.Label{
+ keys.Msg.Of(message),
+ keys.Err.Of(err),
+ }, labels))
+}
+
+// IsError returns true if the event was built by the Error function.
+// It is intended to be used in exporters to identify the semantics of the
+// event when deciding what to do with it.
+func IsError(ev core.Event) bool {
+ return ev.Label(0).Key() == keys.Msg &&
+ ev.Label(1).Key() == keys.Err
+}
+
+// Metric sends a label event to the exporter with the supplied labels.
+func Metric(ctx context.Context, labels ...label.Label) {
+ core.Export(ctx, core.MakeEvent([3]label.Label{
+ keys.Metric.New(),
+ }, labels))
+}
+
+// IsMetric returns true if the event was built by the Metric function.
+// It is intended to be used in exporters to identify the semantics of the
+// event when deciding what to do with it.
+func IsMetric(ev core.Event) bool {
+ return ev.Label(0).Key() == keys.Metric
+}
+
+// Label sends a label event to the exporter with the supplied labels.
+func Label(ctx context.Context, labels ...label.Label) context.Context {
+ return core.Export(ctx, core.MakeEvent([3]label.Label{
+ keys.Label.New(),
+ }, labels))
+}
+
+// IsLabel returns true if the event was built by the Label function.
+// It is intended to be used in exporters to identify the semantics of the
+// event when deciding what to do with it.
+func IsLabel(ev core.Event) bool {
+ return ev.Label(0).Key() == keys.Label
+}
+
+// Start sends a span start event with the supplied label list to the exporter.
+// It also returns a function that will end the span, which should normally be
+// deferred.
+func Start(ctx context.Context, name string, labels ...label.Label) (context.Context, func()) {
+ return core.ExportPair(ctx,
+ core.MakeEvent([3]label.Label{
+ keys.Start.Of(name),
+ }, labels),
+ core.MakeEvent([3]label.Label{
+ keys.End.New(),
+ }, nil))
+}
+
+// IsStart returns true if the event was built by the Start function.
+// It is intended to be used in exporters to identify the semantics of the
+// event when deciding what to do with it.
+func IsStart(ev core.Event) bool {
+ return ev.Label(0).Key() == keys.Start
+}
+
+// IsEnd returns true if the event was built by the End function.
+// It is intended to be used in exporters to identify the semantics of the
+// event when deciding what to do with it.
+func IsEnd(ev core.Event) bool {
+ return ev.Label(0).Key() == keys.End
+}
+
+// Detach returns a context without an associated span.
+// This allows the creation of spans that are not children of the current span.
+func Detach(ctx context.Context) context.Context {
+ return core.Export(ctx, core.MakeEvent([3]label.Label{
+ keys.Detach.New(),
+ }, nil))
+}
+
+// IsDetach returns true if the event was built by the Detach function.
+// It is intended to be used in exporters to identify the semantics of the
+// event when deciding what to do with it.
+func IsDetach(ev core.Event) bool {
+ return ev.Label(0).Key() == keys.Detach
+}
diff --git a/vendor/golang.org/x/tools/internal/event/keys/keys.go b/vendor/golang.org/x/tools/internal/event/keys/keys.go
new file mode 100644
index 0000000000..a02206e301
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/event/keys/keys.go
@@ -0,0 +1,564 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package keys
+
+import (
+ "fmt"
+ "io"
+ "math"
+ "strconv"
+
+ "golang.org/x/tools/internal/event/label"
+)
+
+// Value represents a key for untyped values.
+type Value struct {
+ name string
+ description string
+}
+
+// New creates a new Key for untyped values.
+func New(name, description string) *Value {
+ return &Value{name: name, description: description}
+}
+
+func (k *Value) Name() string { return k.name }
+func (k *Value) Description() string { return k.description }
+
+func (k *Value) Format(w io.Writer, buf []byte, l label.Label) {
+ fmt.Fprint(w, k.From(l))
+}
+
+// Get can be used to get a label for the key from a label.Map.
+func (k *Value) Get(lm label.Map) interface{} {
+ if t := lm.Find(k); t.Valid() {
+ return k.From(t)
+ }
+ return nil
+}
+
+// From can be used to get a value from a Label.
+func (k *Value) From(t label.Label) interface{} { return t.UnpackValue() }
+
+// Of creates a new Label with this key and the supplied value.
+func (k *Value) Of(value interface{}) label.Label { return label.OfValue(k, value) }
+
+// Tag represents a key for tagging labels that have no value.
+// These are used when the existence of the label is the entire information it
+// carries, such as marking events to be of a specific kind, or from a specific
+// package.
+type Tag struct {
+ name string
+ description string
+}
+
+// NewTag creates a new Key for tagging labels.
+func NewTag(name, description string) *Tag {
+ return &Tag{name: name, description: description}
+}
+
+func (k *Tag) Name() string { return k.name }
+func (k *Tag) Description() string { return k.description }
+
+func (k *Tag) Format(w io.Writer, buf []byte, l label.Label) {}
+
+// New creates a new Label with this key.
+func (k *Tag) New() label.Label { return label.OfValue(k, nil) }
+
+// Int represents a key
+type Int struct {
+ name string
+ description string
+}
+
+// NewInt creates a new Key for int values.
+func NewInt(name, description string) *Int {
+ return &Int{name: name, description: description}
+}
+
+func (k *Int) Name() string { return k.name }
+func (k *Int) Description() string { return k.description }
+
+func (k *Int) Format(w io.Writer, buf []byte, l label.Label) {
+ w.Write(strconv.AppendInt(buf, int64(k.From(l)), 10))
+}
+
+// Of creates a new Label with this key and the supplied value.
+func (k *Int) Of(v int) label.Label { return label.Of64(k, uint64(v)) }
+
+// Get can be used to get a label for the key from a label.Map.
+func (k *Int) Get(lm label.Map) int {
+ if t := lm.Find(k); t.Valid() {
+ return k.From(t)
+ }
+ return 0
+}
+
+// From can be used to get a value from a Label.
+func (k *Int) From(t label.Label) int { return int(t.Unpack64()) }
+
+// Int8 represents a key
+type Int8 struct {
+ name string
+ description string
+}
+
+// NewInt8 creates a new Key for int8 values.
+func NewInt8(name, description string) *Int8 {
+ return &Int8{name: name, description: description}
+}
+
+func (k *Int8) Name() string { return k.name }
+func (k *Int8) Description() string { return k.description }
+
+func (k *Int8) Format(w io.Writer, buf []byte, l label.Label) {
+ w.Write(strconv.AppendInt(buf, int64(k.From(l)), 10))
+}
+
+// Of creates a new Label with this key and the supplied value.
+func (k *Int8) Of(v int8) label.Label { return label.Of64(k, uint64(v)) }
+
+// Get can be used to get a label for the key from a label.Map.
+func (k *Int8) Get(lm label.Map) int8 {
+ if t := lm.Find(k); t.Valid() {
+ return k.From(t)
+ }
+ return 0
+}
+
+// From can be used to get a value from a Label.
+func (k *Int8) From(t label.Label) int8 { return int8(t.Unpack64()) }
+
+// Int16 represents a key
+type Int16 struct {
+ name string
+ description string
+}
+
+// NewInt16 creates a new Key for int16 values.
+func NewInt16(name, description string) *Int16 {
+ return &Int16{name: name, description: description}
+}
+
+func (k *Int16) Name() string { return k.name }
+func (k *Int16) Description() string { return k.description }
+
+func (k *Int16) Format(w io.Writer, buf []byte, l label.Label) {
+ w.Write(strconv.AppendInt(buf, int64(k.From(l)), 10))
+}
+
+// Of creates a new Label with this key and the supplied value.
+func (k *Int16) Of(v int16) label.Label { return label.Of64(k, uint64(v)) }
+
+// Get can be used to get a label for the key from a label.Map.
+func (k *Int16) Get(lm label.Map) int16 {
+ if t := lm.Find(k); t.Valid() {
+ return k.From(t)
+ }
+ return 0
+}
+
+// From can be used to get a value from a Label.
+func (k *Int16) From(t label.Label) int16 { return int16(t.Unpack64()) }
+
+// Int32 represents a key
+type Int32 struct {
+ name string
+ description string
+}
+
+// NewInt32 creates a new Key for int32 values.
+func NewInt32(name, description string) *Int32 {
+ return &Int32{name: name, description: description}
+}
+
+func (k *Int32) Name() string { return k.name }
+func (k *Int32) Description() string { return k.description }
+
+func (k *Int32) Format(w io.Writer, buf []byte, l label.Label) {
+ w.Write(strconv.AppendInt(buf, int64(k.From(l)), 10))
+}
+
+// Of creates a new Label with this key and the supplied value.
+func (k *Int32) Of(v int32) label.Label { return label.Of64(k, uint64(v)) }
+
+// Get can be used to get a label for the key from a label.Map.
+func (k *Int32) Get(lm label.Map) int32 {
+ if t := lm.Find(k); t.Valid() {
+ return k.From(t)
+ }
+ return 0
+}
+
+// From can be used to get a value from a Label.
+func (k *Int32) From(t label.Label) int32 { return int32(t.Unpack64()) }
+
+// Int64 represents a key
+type Int64 struct {
+ name string
+ description string
+}
+
+// NewInt64 creates a new Key for int64 values.
+func NewInt64(name, description string) *Int64 {
+ return &Int64{name: name, description: description}
+}
+
+func (k *Int64) Name() string { return k.name }
+func (k *Int64) Description() string { return k.description }
+
+func (k *Int64) Format(w io.Writer, buf []byte, l label.Label) {
+ w.Write(strconv.AppendInt(buf, k.From(l), 10))
+}
+
+// Of creates a new Label with this key and the supplied value.
+func (k *Int64) Of(v int64) label.Label { return label.Of64(k, uint64(v)) }
+
+// Get can be used to get a label for the key from a label.Map.
+func (k *Int64) Get(lm label.Map) int64 {
+ if t := lm.Find(k); t.Valid() {
+ return k.From(t)
+ }
+ return 0
+}
+
+// From can be used to get a value from a Label.
+func (k *Int64) From(t label.Label) int64 { return int64(t.Unpack64()) }
+
+// UInt represents a key
+type UInt struct {
+ name string
+ description string
+}
+
+// NewUInt creates a new Key for uint values.
+func NewUInt(name, description string) *UInt {
+ return &UInt{name: name, description: description}
+}
+
+func (k *UInt) Name() string { return k.name }
+func (k *UInt) Description() string { return k.description }
+
+func (k *UInt) Format(w io.Writer, buf []byte, l label.Label) {
+ w.Write(strconv.AppendUint(buf, uint64(k.From(l)), 10))
+}
+
+// Of creates a new Label with this key and the supplied value.
+func (k *UInt) Of(v uint) label.Label { return label.Of64(k, uint64(v)) }
+
+// Get can be used to get a label for the key from a label.Map.
+func (k *UInt) Get(lm label.Map) uint {
+ if t := lm.Find(k); t.Valid() {
+ return k.From(t)
+ }
+ return 0
+}
+
+// From can be used to get a value from a Label.
+func (k *UInt) From(t label.Label) uint { return uint(t.Unpack64()) }
+
+// UInt8 represents a key
+type UInt8 struct {
+ name string
+ description string
+}
+
+// NewUInt8 creates a new Key for uint8 values.
+func NewUInt8(name, description string) *UInt8 {
+ return &UInt8{name: name, description: description}
+}
+
+func (k *UInt8) Name() string { return k.name }
+func (k *UInt8) Description() string { return k.description }
+
+func (k *UInt8) Format(w io.Writer, buf []byte, l label.Label) {
+ w.Write(strconv.AppendUint(buf, uint64(k.From(l)), 10))
+}
+
+// Of creates a new Label with this key and the supplied value.
+func (k *UInt8) Of(v uint8) label.Label { return label.Of64(k, uint64(v)) }
+
+// Get can be used to get a label for the key from a label.Map.
+func (k *UInt8) Get(lm label.Map) uint8 {
+ if t := lm.Find(k); t.Valid() {
+ return k.From(t)
+ }
+ return 0
+}
+
+// From can be used to get a value from a Label.
+func (k *UInt8) From(t label.Label) uint8 { return uint8(t.Unpack64()) }
+
+// UInt16 represents a key
+type UInt16 struct {
+ name string
+ description string
+}
+
+// NewUInt16 creates a new Key for uint16 values.
+func NewUInt16(name, description string) *UInt16 {
+ return &UInt16{name: name, description: description}
+}
+
+func (k *UInt16) Name() string { return k.name }
+func (k *UInt16) Description() string { return k.description }
+
+func (k *UInt16) Format(w io.Writer, buf []byte, l label.Label) {
+ w.Write(strconv.AppendUint(buf, uint64(k.From(l)), 10))
+}
+
+// Of creates a new Label with this key and the supplied value.
+func (k *UInt16) Of(v uint16) label.Label { return label.Of64(k, uint64(v)) }
+
+// Get can be used to get a label for the key from a label.Map.
+func (k *UInt16) Get(lm label.Map) uint16 {
+ if t := lm.Find(k); t.Valid() {
+ return k.From(t)
+ }
+ return 0
+}
+
+// From can be used to get a value from a Label.
+func (k *UInt16) From(t label.Label) uint16 { return uint16(t.Unpack64()) }
+
+// UInt32 represents a key
+type UInt32 struct {
+ name string
+ description string
+}
+
+// NewUInt32 creates a new Key for uint32 values.
+func NewUInt32(name, description string) *UInt32 {
+ return &UInt32{name: name, description: description}
+}
+
+func (k *UInt32) Name() string { return k.name }
+func (k *UInt32) Description() string { return k.description }
+
+func (k *UInt32) Format(w io.Writer, buf []byte, l label.Label) {
+ w.Write(strconv.AppendUint(buf, uint64(k.From(l)), 10))
+}
+
+// Of creates a new Label with this key and the supplied value.
+func (k *UInt32) Of(v uint32) label.Label { return label.Of64(k, uint64(v)) }
+
+// Get can be used to get a label for the key from a label.Map.
+func (k *UInt32) Get(lm label.Map) uint32 {
+ if t := lm.Find(k); t.Valid() {
+ return k.From(t)
+ }
+ return 0
+}
+
+// From can be used to get a value from a Label.
+func (k *UInt32) From(t label.Label) uint32 { return uint32(t.Unpack64()) }
+
+// UInt64 represents a key
+type UInt64 struct {
+ name string
+ description string
+}
+
+// NewUInt64 creates a new Key for uint64 values.
+func NewUInt64(name, description string) *UInt64 {
+ return &UInt64{name: name, description: description}
+}
+
+func (k *UInt64) Name() string { return k.name }
+func (k *UInt64) Description() string { return k.description }
+
+func (k *UInt64) Format(w io.Writer, buf []byte, l label.Label) {
+ w.Write(strconv.AppendUint(buf, k.From(l), 10))
+}
+
+// Of creates a new Label with this key and the supplied value.
+func (k *UInt64) Of(v uint64) label.Label { return label.Of64(k, v) }
+
+// Get can be used to get a label for the key from a label.Map.
+func (k *UInt64) Get(lm label.Map) uint64 {
+ if t := lm.Find(k); t.Valid() {
+ return k.From(t)
+ }
+ return 0
+}
+
+// From can be used to get a value from a Label.
+func (k *UInt64) From(t label.Label) uint64 { return t.Unpack64() }
+
+// Float32 represents a key
+type Float32 struct {
+ name string
+ description string
+}
+
+// NewFloat32 creates a new Key for float32 values.
+func NewFloat32(name, description string) *Float32 {
+ return &Float32{name: name, description: description}
+}
+
+func (k *Float32) Name() string { return k.name }
+func (k *Float32) Description() string { return k.description }
+
+func (k *Float32) Format(w io.Writer, buf []byte, l label.Label) {
+ w.Write(strconv.AppendFloat(buf, float64(k.From(l)), 'E', -1, 32))
+}
+
+// Of creates a new Label with this key and the supplied value.
+func (k *Float32) Of(v float32) label.Label {
+ return label.Of64(k, uint64(math.Float32bits(v)))
+}
+
+// Get can be used to get a label for the key from a label.Map.
+func (k *Float32) Get(lm label.Map) float32 {
+ if t := lm.Find(k); t.Valid() {
+ return k.From(t)
+ }
+ return 0
+}
+
+// From can be used to get a value from a Label.
+func (k *Float32) From(t label.Label) float32 {
+ return math.Float32frombits(uint32(t.Unpack64()))
+}
+
+// Float64 represents a key
+type Float64 struct {
+ name string
+ description string
+}
+
+// NewFloat64 creates a new Key for int64 values.
+func NewFloat64(name, description string) *Float64 {
+ return &Float64{name: name, description: description}
+}
+
+func (k *Float64) Name() string { return k.name }
+func (k *Float64) Description() string { return k.description }
+
+func (k *Float64) Format(w io.Writer, buf []byte, l label.Label) {
+ w.Write(strconv.AppendFloat(buf, k.From(l), 'E', -1, 64))
+}
+
+// Of creates a new Label with this key and the supplied value.
+func (k *Float64) Of(v float64) label.Label {
+ return label.Of64(k, math.Float64bits(v))
+}
+
+// Get can be used to get a label for the key from a label.Map.
+func (k *Float64) Get(lm label.Map) float64 {
+ if t := lm.Find(k); t.Valid() {
+ return k.From(t)
+ }
+ return 0
+}
+
+// From can be used to get a value from a Label.
+func (k *Float64) From(t label.Label) float64 {
+ return math.Float64frombits(t.Unpack64())
+}
+
+// String represents a key
+type String struct {
+ name string
+ description string
+}
+
+// NewString creates a new Key for int64 values.
+func NewString(name, description string) *String {
+ return &String{name: name, description: description}
+}
+
+func (k *String) Name() string { return k.name }
+func (k *String) Description() string { return k.description }
+
+func (k *String) Format(w io.Writer, buf []byte, l label.Label) {
+ w.Write(strconv.AppendQuote(buf, k.From(l)))
+}
+
+// Of creates a new Label with this key and the supplied value.
+func (k *String) Of(v string) label.Label { return label.OfString(k, v) }
+
+// Get can be used to get a label for the key from a label.Map.
+func (k *String) Get(lm label.Map) string {
+ if t := lm.Find(k); t.Valid() {
+ return k.From(t)
+ }
+ return ""
+}
+
+// From can be used to get a value from a Label.
+func (k *String) From(t label.Label) string { return t.UnpackString() }
+
+// Boolean represents a key
+type Boolean struct {
+ name string
+ description string
+}
+
+// NewBoolean creates a new Key for bool values.
+func NewBoolean(name, description string) *Boolean {
+ return &Boolean{name: name, description: description}
+}
+
+func (k *Boolean) Name() string { return k.name }
+func (k *Boolean) Description() string { return k.description }
+
+func (k *Boolean) Format(w io.Writer, buf []byte, l label.Label) {
+ w.Write(strconv.AppendBool(buf, k.From(l)))
+}
+
+// Of creates a new Label with this key and the supplied value.
+func (k *Boolean) Of(v bool) label.Label {
+ if v {
+ return label.Of64(k, 1)
+ }
+ return label.Of64(k, 0)
+}
+
+// Get can be used to get a label for the key from a label.Map.
+func (k *Boolean) Get(lm label.Map) bool {
+ if t := lm.Find(k); t.Valid() {
+ return k.From(t)
+ }
+ return false
+}
+
+// From can be used to get a value from a Label.
+func (k *Boolean) From(t label.Label) bool { return t.Unpack64() > 0 }
+
+// Error represents a key
+type Error struct {
+ name string
+ description string
+}
+
+// NewError creates a new Key for int64 values.
+func NewError(name, description string) *Error {
+ return &Error{name: name, description: description}
+}
+
+func (k *Error) Name() string { return k.name }
+func (k *Error) Description() string { return k.description }
+
+func (k *Error) Format(w io.Writer, buf []byte, l label.Label) {
+ io.WriteString(w, k.From(l).Error())
+}
+
+// Of creates a new Label with this key and the supplied value.
+func (k *Error) Of(v error) label.Label { return label.OfValue(k, v) }
+
+// Get can be used to get a label for the key from a label.Map.
+func (k *Error) Get(lm label.Map) error {
+ if t := lm.Find(k); t.Valid() {
+ return k.From(t)
+ }
+ return nil
+}
+
+// From can be used to get a value from a Label.
+func (k *Error) From(t label.Label) error {
+ err, _ := t.UnpackValue().(error)
+ return err
+}
diff --git a/vendor/golang.org/x/tools/internal/event/keys/standard.go b/vendor/golang.org/x/tools/internal/event/keys/standard.go
new file mode 100644
index 0000000000..7e95866592
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/event/keys/standard.go
@@ -0,0 +1,22 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package keys
+
+var (
+ // Msg is a key used to add message strings to label lists.
+ Msg = NewString("message", "a readable message")
+ // Label is a key used to indicate an event adds labels to the context.
+ Label = NewTag("label", "a label context marker")
+ // Start is used for things like traces that have a name.
+ Start = NewString("start", "span start")
+ // Metric is a key used to indicate an event records metrics.
+ End = NewTag("end", "a span end marker")
+ // Metric is a key used to indicate an event records metrics.
+ Detach = NewTag("detach", "a span detach marker")
+ // Err is a key used to add error values to label lists.
+ Err = NewError("error", "an error that occurred")
+ // Metric is a key used to indicate an event records metrics.
+ Metric = NewTag("metric", "a metric event marker")
+)
diff --git a/vendor/golang.org/x/tools/internal/event/keys/util.go b/vendor/golang.org/x/tools/internal/event/keys/util.go
new file mode 100644
index 0000000000..c0e8e731c9
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/event/keys/util.go
@@ -0,0 +1,21 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package keys
+
+import (
+ "sort"
+ "strings"
+)
+
+// Join returns a canonical join of the keys in S:
+// a sorted comma-separated string list.
+func Join[S ~[]T, T ~string](s S) string {
+ strs := make([]string, 0, len(s))
+ for _, v := range s {
+ strs = append(strs, string(v))
+ }
+ sort.Strings(strs)
+ return strings.Join(strs, ",")
+}
diff --git a/vendor/golang.org/x/tools/internal/event/label/label.go b/vendor/golang.org/x/tools/internal/event/label/label.go
new file mode 100644
index 0000000000..0f526e1f9a
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/event/label/label.go
@@ -0,0 +1,215 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package label
+
+import (
+ "fmt"
+ "io"
+ "reflect"
+ "unsafe"
+)
+
+// Key is used as the identity of a Label.
+// Keys are intended to be compared by pointer only, the name should be unique
+// for communicating with external systems, but it is not required or enforced.
+type Key interface {
+ // Name returns the key name.
+ Name() string
+ // Description returns a string that can be used to describe the value.
+ Description() string
+
+ // Format is used in formatting to append the value of the label to the
+ // supplied buffer.
+ // The formatter may use the supplied buf as a scratch area to avoid
+ // allocations.
+ Format(w io.Writer, buf []byte, l Label)
+}
+
+// Label holds a key and value pair.
+// It is normally used when passing around lists of labels.
+type Label struct {
+ key Key
+ packed uint64
+ untyped interface{}
+}
+
+// Map is the interface to a collection of Labels indexed by key.
+type Map interface {
+ // Find returns the label that matches the supplied key.
+ Find(key Key) Label
+}
+
+// List is the interface to something that provides an iterable
+// list of labels.
+// Iteration should start from 0 and continue until Valid returns false.
+type List interface {
+ // Valid returns true if the index is within range for the list.
+ // It does not imply the label at that index will itself be valid.
+ Valid(index int) bool
+ // Label returns the label at the given index.
+ Label(index int) Label
+}
+
+// list implements LabelList for a list of Labels.
+type list struct {
+ labels []Label
+}
+
+// filter wraps a LabelList filtering out specific labels.
+type filter struct {
+ keys []Key
+ underlying List
+}
+
+// listMap implements LabelMap for a simple list of labels.
+type listMap struct {
+ labels []Label
+}
+
+// mapChain implements LabelMap for a list of underlying LabelMap.
+type mapChain struct {
+ maps []Map
+}
+
+// OfValue creates a new label from the key and value.
+// This method is for implementing new key types, label creation should
+// normally be done with the Of method of the key.
+func OfValue(k Key, value interface{}) Label { return Label{key: k, untyped: value} }
+
+// UnpackValue assumes the label was built using LabelOfValue and returns the value
+// that was passed to that constructor.
+// This method is for implementing new key types, for type safety normal
+// access should be done with the From method of the key.
+func (t Label) UnpackValue() interface{} { return t.untyped }
+
+// Of64 creates a new label from a key and a uint64. This is often
+// used for non uint64 values that can be packed into a uint64.
+// This method is for implementing new key types, label creation should
+// normally be done with the Of method of the key.
+func Of64(k Key, v uint64) Label { return Label{key: k, packed: v} }
+
+// Unpack64 assumes the label was built using LabelOf64 and returns the value that
+// was passed to that constructor.
+// This method is for implementing new key types, for type safety normal
+// access should be done with the From method of the key.
+func (t Label) Unpack64() uint64 { return t.packed }
+
+type stringptr unsafe.Pointer
+
+// OfString creates a new label from a key and a string.
+// This method is for implementing new key types, label creation should
+// normally be done with the Of method of the key.
+func OfString(k Key, v string) Label {
+ hdr := (*reflect.StringHeader)(unsafe.Pointer(&v))
+ return Label{
+ key: k,
+ packed: uint64(hdr.Len),
+ untyped: stringptr(hdr.Data),
+ }
+}
+
+// UnpackString assumes the label was built using LabelOfString and returns the
+// value that was passed to that constructor.
+// This method is for implementing new key types, for type safety normal
+// access should be done with the From method of the key.
+func (t Label) UnpackString() string {
+ var v string
+ hdr := (*reflect.StringHeader)(unsafe.Pointer(&v))
+ hdr.Data = uintptr(t.untyped.(stringptr))
+ hdr.Len = int(t.packed)
+ return v
+}
+
+// Valid returns true if the Label is a valid one (it has a key).
+func (t Label) Valid() bool { return t.key != nil }
+
+// Key returns the key of this Label.
+func (t Label) Key() Key { return t.key }
+
+// Format is used for debug printing of labels.
+func (t Label) Format(f fmt.State, r rune) {
+ if !t.Valid() {
+ io.WriteString(f, `nil`)
+ return
+ }
+ io.WriteString(f, t.Key().Name())
+ io.WriteString(f, "=")
+ var buf [128]byte
+ t.Key().Format(f, buf[:0], t)
+}
+
+func (l *list) Valid(index int) bool {
+ return index >= 0 && index < len(l.labels)
+}
+
+func (l *list) Label(index int) Label {
+ return l.labels[index]
+}
+
+func (f *filter) Valid(index int) bool {
+ return f.underlying.Valid(index)
+}
+
+func (f *filter) Label(index int) Label {
+ l := f.underlying.Label(index)
+ for _, f := range f.keys {
+ if l.Key() == f {
+ return Label{}
+ }
+ }
+ return l
+}
+
+func (lm listMap) Find(key Key) Label {
+ for _, l := range lm.labels {
+ if l.Key() == key {
+ return l
+ }
+ }
+ return Label{}
+}
+
+func (c mapChain) Find(key Key) Label {
+ for _, src := range c.maps {
+ l := src.Find(key)
+ if l.Valid() {
+ return l
+ }
+ }
+ return Label{}
+}
+
+var emptyList = &list{}
+
+func NewList(labels ...Label) List {
+ if len(labels) == 0 {
+ return emptyList
+ }
+ return &list{labels: labels}
+}
+
+func Filter(l List, keys ...Key) List {
+ if len(keys) == 0 {
+ return l
+ }
+ return &filter{keys: keys, underlying: l}
+}
+
+func NewMap(labels ...Label) Map {
+ return listMap{labels: labels}
+}
+
+func MergeMaps(srcs ...Map) Map {
+ var nonNil []Map
+ for _, src := range srcs {
+ if src != nil {
+ nonNil = append(nonNil, src)
+ }
+ }
+ if len(nonNil) == 1 {
+ return nonNil[0]
+ }
+ return mapChain{maps: nonNil}
+}
diff --git a/vendor/golang.org/x/tools/internal/event/tag/tag.go b/vendor/golang.org/x/tools/internal/event/tag/tag.go
new file mode 100644
index 0000000000..581b26c204
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/event/tag/tag.go
@@ -0,0 +1,59 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package tag provides the labels used for telemetry throughout gopls.
+package tag
+
+import (
+ "golang.org/x/tools/internal/event/keys"
+)
+
+var (
+ // create the label keys we use
+ Method = keys.NewString("method", "")
+ StatusCode = keys.NewString("status.code", "")
+ StatusMessage = keys.NewString("status.message", "")
+ RPCID = keys.NewString("id", "")
+ RPCDirection = keys.NewString("direction", "")
+ File = keys.NewString("file", "")
+ Directory = keys.New("directory", "")
+ URI = keys.New("URI", "")
+ Package = keys.NewString("package", "") // sorted comma-separated list of Package IDs
+ PackagePath = keys.NewString("package_path", "")
+ Query = keys.New("query", "")
+ Snapshot = keys.NewUInt64("snapshot", "")
+ Operation = keys.NewString("operation", "")
+
+ Position = keys.New("position", "")
+ Category = keys.NewString("category", "")
+ PackageCount = keys.NewInt("packages", "")
+ Files = keys.New("files", "")
+ Port = keys.NewInt("port", "")
+ Type = keys.New("type", "")
+ HoverKind = keys.NewString("hoverkind", "")
+
+ NewServer = keys.NewString("new_server", "A new server was added")
+ EndServer = keys.NewString("end_server", "A server was shut down")
+
+ ServerID = keys.NewString("server", "The server ID an event is related to")
+ Logfile = keys.NewString("logfile", "")
+ DebugAddress = keys.NewString("debug_address", "")
+ GoplsPath = keys.NewString("gopls_path", "")
+ ClientID = keys.NewString("client_id", "")
+
+ Level = keys.NewInt("level", "The logging level")
+)
+
+var (
+ // create the stats we measure
+ Started = keys.NewInt64("started", "Count of started RPCs.")
+ ReceivedBytes = keys.NewInt64("received_bytes", "Bytes received.") //, unit.Bytes)
+ SentBytes = keys.NewInt64("sent_bytes", "Bytes sent.") //, unit.Bytes)
+ Latency = keys.NewFloat64("latency_ms", "Elapsed time in milliseconds") //, unit.Milliseconds)
+)
+
+const (
+ Inbound = "in"
+ Outbound = "out"
+)
diff --git a/vendor/golang.org/x/tools/internal/gcimporter/bimport.go b/vendor/golang.org/x/tools/internal/gcimporter/bimport.go
new file mode 100644
index 0000000000..d98b0db2a9
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/gcimporter/bimport.go
@@ -0,0 +1,150 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file contains the remaining vestiges of
+// $GOROOT/src/go/internal/gcimporter/bimport.go.
+
+package gcimporter
+
+import (
+ "fmt"
+ "go/token"
+ "go/types"
+ "sync"
+)
+
+func errorf(format string, args ...interface{}) {
+ panic(fmt.Sprintf(format, args...))
+}
+
+const deltaNewFile = -64 // see cmd/compile/internal/gc/bexport.go
+
+// Synthesize a token.Pos
+type fakeFileSet struct {
+ fset *token.FileSet
+ files map[string]*fileInfo
+}
+
+type fileInfo struct {
+ file *token.File
+ lastline int
+}
+
+const maxlines = 64 * 1024
+
+func (s *fakeFileSet) pos(file string, line, column int) token.Pos {
+ // TODO(mdempsky): Make use of column.
+
+ // Since we don't know the set of needed file positions, we reserve maxlines
+ // positions per file. We delay calling token.File.SetLines until all
+ // positions have been calculated (by way of fakeFileSet.setLines), so that
+ // we can avoid setting unnecessary lines. See also golang/go#46586.
+ f := s.files[file]
+ if f == nil {
+ f = &fileInfo{file: s.fset.AddFile(file, -1, maxlines)}
+ s.files[file] = f
+ }
+ if line > maxlines {
+ line = 1
+ }
+ if line > f.lastline {
+ f.lastline = line
+ }
+
+ // Return a fake position assuming that f.file consists only of newlines.
+ return token.Pos(f.file.Base() + line - 1)
+}
+
+func (s *fakeFileSet) setLines() {
+ fakeLinesOnce.Do(func() {
+ fakeLines = make([]int, maxlines)
+ for i := range fakeLines {
+ fakeLines[i] = i
+ }
+ })
+ for _, f := range s.files {
+ f.file.SetLines(fakeLines[:f.lastline])
+ }
+}
+
+var (
+ fakeLines []int
+ fakeLinesOnce sync.Once
+)
+
+func chanDir(d int) types.ChanDir {
+ // tag values must match the constants in cmd/compile/internal/gc/go.go
+ switch d {
+ case 1 /* Crecv */ :
+ return types.RecvOnly
+ case 2 /* Csend */ :
+ return types.SendOnly
+ case 3 /* Cboth */ :
+ return types.SendRecv
+ default:
+ errorf("unexpected channel dir %d", d)
+ return 0
+ }
+}
+
+var predeclOnce sync.Once
+var predecl []types.Type // initialized lazily
+
+func predeclared() []types.Type {
+ predeclOnce.Do(func() {
+ // initialize lazily to be sure that all
+ // elements have been initialized before
+ predecl = []types.Type{ // basic types
+ types.Typ[types.Bool],
+ types.Typ[types.Int],
+ types.Typ[types.Int8],
+ types.Typ[types.Int16],
+ types.Typ[types.Int32],
+ types.Typ[types.Int64],
+ types.Typ[types.Uint],
+ types.Typ[types.Uint8],
+ types.Typ[types.Uint16],
+ types.Typ[types.Uint32],
+ types.Typ[types.Uint64],
+ types.Typ[types.Uintptr],
+ types.Typ[types.Float32],
+ types.Typ[types.Float64],
+ types.Typ[types.Complex64],
+ types.Typ[types.Complex128],
+ types.Typ[types.String],
+
+ // basic type aliases
+ types.Universe.Lookup("byte").Type(),
+ types.Universe.Lookup("rune").Type(),
+
+ // error
+ types.Universe.Lookup("error").Type(),
+
+ // untyped types
+ types.Typ[types.UntypedBool],
+ types.Typ[types.UntypedInt],
+ types.Typ[types.UntypedRune],
+ types.Typ[types.UntypedFloat],
+ types.Typ[types.UntypedComplex],
+ types.Typ[types.UntypedString],
+ types.Typ[types.UntypedNil],
+
+ // package unsafe
+ types.Typ[types.UnsafePointer],
+
+ // invalid type
+ types.Typ[types.Invalid], // only appears in packages with errors
+
+ // used internally by gc; never used by this package or in .a files
+ anyType{},
+ }
+ predecl = append(predecl, additionalPredeclared()...)
+ })
+ return predecl
+}
+
+type anyType struct{}
+
+func (t anyType) Underlying() types.Type { return t }
+func (t anyType) String() string { return "any" }
diff --git a/vendor/golang.org/x/tools/internal/gcimporter/exportdata.go b/vendor/golang.org/x/tools/internal/gcimporter/exportdata.go
new file mode 100644
index 0000000000..f6437feb1c
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/gcimporter/exportdata.go
@@ -0,0 +1,99 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file is a copy of $GOROOT/src/go/internal/gcimporter/exportdata.go.
+
+// This file implements FindExportData.
+
+package gcimporter
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "strconv"
+ "strings"
+)
+
+func readGopackHeader(r *bufio.Reader) (name string, size int64, err error) {
+ // See $GOROOT/include/ar.h.
+ hdr := make([]byte, 16+12+6+6+8+10+2)
+ _, err = io.ReadFull(r, hdr)
+ if err != nil {
+ return
+ }
+ // leave for debugging
+ if false {
+ fmt.Printf("header: %s", hdr)
+ }
+ s := strings.TrimSpace(string(hdr[16+12+6+6+8:][:10]))
+ length, err := strconv.Atoi(s)
+ size = int64(length)
+ if err != nil || hdr[len(hdr)-2] != '`' || hdr[len(hdr)-1] != '\n' {
+ err = fmt.Errorf("invalid archive header")
+ return
+ }
+ name = strings.TrimSpace(string(hdr[:16]))
+ return
+}
+
+// FindExportData positions the reader r at the beginning of the
+// export data section of an underlying GC-created object/archive
+// file by reading from it. The reader must be positioned at the
+// start of the file before calling this function. The hdr result
+// is the string before the export data, either "$$" or "$$B".
+// The size result is the length of the export data in bytes, or -1 if not known.
+func FindExportData(r *bufio.Reader) (hdr string, size int64, err error) {
+ // Read first line to make sure this is an object file.
+ line, err := r.ReadSlice('\n')
+ if err != nil {
+ err = fmt.Errorf("can't find export data (%v)", err)
+ return
+ }
+
+ if string(line) == "!\n" {
+ // Archive file. Scan to __.PKGDEF.
+ var name string
+ if name, size, err = readGopackHeader(r); err != nil {
+ return
+ }
+
+ // First entry should be __.PKGDEF.
+ if name != "__.PKGDEF" {
+ err = fmt.Errorf("go archive is missing __.PKGDEF")
+ return
+ }
+
+ // Read first line of __.PKGDEF data, so that line
+ // is once again the first line of the input.
+ if line, err = r.ReadSlice('\n'); err != nil {
+ err = fmt.Errorf("can't find export data (%v)", err)
+ return
+ }
+ size -= int64(len(line))
+ }
+
+ // Now at __.PKGDEF in archive or still at beginning of file.
+ // Either way, line should begin with "go object ".
+ if !strings.HasPrefix(string(line), "go object ") {
+ err = fmt.Errorf("not a Go object file")
+ return
+ }
+
+ // Skip over object header to export data.
+ // Begins after first line starting with $$.
+ for line[0] != '$' {
+ if line, err = r.ReadSlice('\n'); err != nil {
+ err = fmt.Errorf("can't find export data (%v)", err)
+ return
+ }
+ size -= int64(len(line))
+ }
+ hdr = string(line)
+ if size < 0 {
+ size = -1
+ }
+
+ return
+}
diff --git a/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go b/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go
new file mode 100644
index 0000000000..2d078ccb19
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go
@@ -0,0 +1,273 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file is a reduced copy of $GOROOT/src/go/internal/gcimporter/gcimporter.go.
+
+// Package gcimporter provides various functions for reading
+// gc-generated object files that can be used to implement the
+// Importer interface defined by the Go 1.5 standard library package.
+//
+// The encoding is deterministic: if the encoder is applied twice to
+// the same types.Package data structure, both encodings are equal.
+// This property may be important to avoid spurious changes in
+// applications such as build systems.
+//
+// However, the encoder is not necessarily idempotent. Importing an
+// exported package may yield a types.Package that, while it
+// represents the same set of Go types as the original, may differ in
+// the details of its internal representation. Because of these
+// differences, re-encoding the imported package may yield a
+// different, but equally valid, encoding of the package.
+package gcimporter // import "golang.org/x/tools/internal/gcimporter"
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "go/build"
+ "go/token"
+ "go/types"
+ "io"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "strings"
+ "sync"
+)
+
+const (
+ // Enable debug during development: it adds some additional checks, and
+ // prevents errors from being recovered.
+ debug = false
+
+ // If trace is set, debugging output is printed to std out.
+ trace = false
+)
+
+var exportMap sync.Map // package dir → func() (string, bool)
+
+// lookupGorootExport returns the location of the export data
+// (normally found in the build cache, but located in GOROOT/pkg
+// in prior Go releases) for the package located in pkgDir.
+//
+// (We use the package's directory instead of its import path
+// mainly to simplify handling of the packages in src/vendor
+// and cmd/vendor.)
+func lookupGorootExport(pkgDir string) (string, bool) {
+ f, ok := exportMap.Load(pkgDir)
+ if !ok {
+ var (
+ listOnce sync.Once
+ exportPath string
+ )
+ f, _ = exportMap.LoadOrStore(pkgDir, func() (string, bool) {
+ listOnce.Do(func() {
+ cmd := exec.Command("go", "list", "-export", "-f", "{{.Export}}", pkgDir)
+ cmd.Dir = build.Default.GOROOT
+ var output []byte
+ output, err := cmd.Output()
+ if err != nil {
+ return
+ }
+
+ exports := strings.Split(string(bytes.TrimSpace(output)), "\n")
+ if len(exports) != 1 {
+ return
+ }
+
+ exportPath = exports[0]
+ })
+
+ return exportPath, exportPath != ""
+ })
+ }
+
+ return f.(func() (string, bool))()
+}
+
+var pkgExts = [...]string{".a", ".o"}
+
+// FindPkg returns the filename and unique package id for an import
+// path based on package information provided by build.Import (using
+// the build.Default build.Context). A relative srcDir is interpreted
+// relative to the current working directory.
+// If no file was found, an empty filename is returned.
+func FindPkg(path, srcDir string) (filename, id string) {
+ if path == "" {
+ return
+ }
+
+ var noext string
+ switch {
+ default:
+ // "x" -> "$GOPATH/pkg/$GOOS_$GOARCH/x.ext", "x"
+ // Don't require the source files to be present.
+ if abs, err := filepath.Abs(srcDir); err == nil { // see issue 14282
+ srcDir = abs
+ }
+ bp, _ := build.Import(path, srcDir, build.FindOnly|build.AllowBinary)
+ if bp.PkgObj == "" {
+ var ok bool
+ if bp.Goroot && bp.Dir != "" {
+ filename, ok = lookupGorootExport(bp.Dir)
+ }
+ if !ok {
+ id = path // make sure we have an id to print in error message
+ return
+ }
+ } else {
+ noext = strings.TrimSuffix(bp.PkgObj, ".a")
+ id = bp.ImportPath
+ }
+
+ case build.IsLocalImport(path):
+ // "./x" -> "/this/directory/x.ext", "/this/directory/x"
+ noext = filepath.Join(srcDir, path)
+ id = noext
+
+ case filepath.IsAbs(path):
+ // for completeness only - go/build.Import
+ // does not support absolute imports
+ // "/x" -> "/x.ext", "/x"
+ noext = path
+ id = path
+ }
+
+ if false { // for debugging
+ if path != id {
+ fmt.Printf("%s -> %s\n", path, id)
+ }
+ }
+
+ if filename != "" {
+ if f, err := os.Stat(filename); err == nil && !f.IsDir() {
+ return
+ }
+ }
+
+ // try extensions
+ for _, ext := range pkgExts {
+ filename = noext + ext
+ if f, err := os.Stat(filename); err == nil && !f.IsDir() {
+ return
+ }
+ }
+
+ filename = "" // not found
+ return
+}
+
+// Import imports a gc-generated package given its import path and srcDir, adds
+// the corresponding package object to the packages map, and returns the object.
+// The packages map must contain all packages already imported.
+func Import(packages map[string]*types.Package, path, srcDir string, lookup func(path string) (io.ReadCloser, error)) (pkg *types.Package, err error) {
+ var rc io.ReadCloser
+ var filename, id string
+ if lookup != nil {
+ // With custom lookup specified, assume that caller has
+ // converted path to a canonical import path for use in the map.
+ if path == "unsafe" {
+ return types.Unsafe, nil
+ }
+ id = path
+
+ // No need to re-import if the package was imported completely before.
+ if pkg = packages[id]; pkg != nil && pkg.Complete() {
+ return
+ }
+ f, err := lookup(path)
+ if err != nil {
+ return nil, err
+ }
+ rc = f
+ } else {
+ filename, id = FindPkg(path, srcDir)
+ if filename == "" {
+ if path == "unsafe" {
+ return types.Unsafe, nil
+ }
+ return nil, fmt.Errorf("can't find import: %q", id)
+ }
+
+ // no need to re-import if the package was imported completely before
+ if pkg = packages[id]; pkg != nil && pkg.Complete() {
+ return
+ }
+
+ // open file
+ f, err := os.Open(filename)
+ if err != nil {
+ return nil, err
+ }
+ defer func() {
+ if err != nil {
+ // add file name to error
+ err = fmt.Errorf("%s: %v", filename, err)
+ }
+ }()
+ rc = f
+ }
+ defer rc.Close()
+
+ var hdr string
+ var size int64
+ buf := bufio.NewReader(rc)
+ if hdr, size, err = FindExportData(buf); err != nil {
+ return
+ }
+
+ switch hdr {
+ case "$$B\n":
+ var data []byte
+ data, err = io.ReadAll(buf)
+ if err != nil {
+ break
+ }
+
+ // TODO(gri): allow clients of go/importer to provide a FileSet.
+ // Or, define a new standard go/types/gcexportdata package.
+ fset := token.NewFileSet()
+
+ // Select appropriate importer.
+ if len(data) > 0 {
+ switch data[0] {
+ case 'v', 'c', 'd': // binary, till go1.10
+ return nil, fmt.Errorf("binary (%c) import format is no longer supported", data[0])
+
+ case 'i': // indexed, till go1.19
+ _, pkg, err := IImportData(fset, packages, data[1:], id)
+ return pkg, err
+
+ case 'u': // unified, from go1.20
+ _, pkg, err := UImportData(fset, packages, data[1:size], id)
+ return pkg, err
+
+ default:
+ l := len(data)
+ if l > 10 {
+ l = 10
+ }
+ return nil, fmt.Errorf("unexpected export data with prefix %q for path %s", string(data[:l]), id)
+ }
+ }
+
+ default:
+ err = fmt.Errorf("unknown export data header: %q", hdr)
+ }
+
+ return
+}
+
+func deref(typ types.Type) types.Type {
+ if p, _ := typ.(*types.Pointer); p != nil {
+ return p.Elem()
+ }
+ return typ
+}
+
+type byPath []*types.Package
+
+func (a byPath) Len() int { return len(a) }
+func (a byPath) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+func (a byPath) Less(i, j int) bool { return a[i].Path() < a[j].Path() }
diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iexport.go b/vendor/golang.org/x/tools/internal/gcimporter/iexport.go
new file mode 100644
index 0000000000..2ee8c70164
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/gcimporter/iexport.go
@@ -0,0 +1,1321 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Indexed binary package export.
+// This file was derived from $GOROOT/src/cmd/compile/internal/gc/iexport.go;
+// see that file for specification of the format.
+
+package gcimporter
+
+import (
+ "bytes"
+ "encoding/binary"
+ "fmt"
+ "go/constant"
+ "go/token"
+ "go/types"
+ "io"
+ "math/big"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+
+ "golang.org/x/tools/go/types/objectpath"
+ "golang.org/x/tools/internal/tokeninternal"
+)
+
+// IExportShallow encodes "shallow" export data for the specified package.
+//
+// No promises are made about the encoding other than that it can be decoded by
+// the same version of IIExportShallow. If you plan to save export data in the
+// file system, be sure to include a cryptographic digest of the executable in
+// the key to avoid version skew.
+//
+// If the provided reportf func is non-nil, it will be used for reporting bugs
+// encountered during export.
+// TODO(rfindley): remove reportf when we are confident enough in the new
+// objectpath encoding.
+func IExportShallow(fset *token.FileSet, pkg *types.Package, reportf ReportFunc) ([]byte, error) {
+ // In principle this operation can only fail if out.Write fails,
+ // but that's impossible for bytes.Buffer---and as a matter of
+ // fact iexportCommon doesn't even check for I/O errors.
+ // TODO(adonovan): handle I/O errors properly.
+ // TODO(adonovan): use byte slices throughout, avoiding copying.
+ const bundle, shallow = false, true
+ var out bytes.Buffer
+ err := iexportCommon(&out, fset, bundle, shallow, iexportVersion, []*types.Package{pkg})
+ return out.Bytes(), err
+}
+
+// IImportShallow decodes "shallow" types.Package data encoded by
+// IExportShallow in the same executable. This function cannot import data from
+// cmd/compile or gcexportdata.Write.
+//
+// The importer calls getPackages to obtain package symbols for all
+// packages mentioned in the export data, including the one being
+// decoded.
+//
+// If the provided reportf func is non-nil, it will be used for reporting bugs
+// encountered during import.
+// TODO(rfindley): remove reportf when we are confident enough in the new
+// objectpath encoding.
+func IImportShallow(fset *token.FileSet, getPackages GetPackagesFunc, data []byte, path string, reportf ReportFunc) (*types.Package, error) {
+ const bundle = false
+ const shallow = true
+ pkgs, err := iimportCommon(fset, getPackages, data, bundle, path, shallow, reportf)
+ if err != nil {
+ return nil, err
+ }
+ return pkgs[0], nil
+}
+
+// ReportFunc is the type of a function used to report formatted bugs.
+type ReportFunc = func(string, ...interface{})
+
+// Current bundled export format version. Increase with each format change.
+// 0: initial implementation
+const bundleVersion = 0
+
+// IExportData writes indexed export data for pkg to out.
+//
+// If no file set is provided, position info will be missing.
+// The package path of the top-level package will not be recorded,
+// so that calls to IImportData can override with a provided package path.
+func IExportData(out io.Writer, fset *token.FileSet, pkg *types.Package) error {
+ const bundle, shallow = false, false
+ return iexportCommon(out, fset, bundle, shallow, iexportVersion, []*types.Package{pkg})
+}
+
+// IExportBundle writes an indexed export bundle for pkgs to out.
+func IExportBundle(out io.Writer, fset *token.FileSet, pkgs []*types.Package) error {
+ const bundle, shallow = true, false
+ return iexportCommon(out, fset, bundle, shallow, iexportVersion, pkgs)
+}
+
+func iexportCommon(out io.Writer, fset *token.FileSet, bundle, shallow bool, version int, pkgs []*types.Package) (err error) {
+ if !debug {
+ defer func() {
+ if e := recover(); e != nil {
+ if ierr, ok := e.(internalError); ok {
+ err = ierr
+ return
+ }
+ // Not an internal error; panic again.
+ panic(e)
+ }
+ }()
+ }
+
+ p := iexporter{
+ fset: fset,
+ version: version,
+ shallow: shallow,
+ allPkgs: map[*types.Package]bool{},
+ stringIndex: map[string]uint64{},
+ declIndex: map[types.Object]uint64{},
+ tparamNames: map[types.Object]string{},
+ typIndex: map[types.Type]uint64{},
+ }
+ if !bundle {
+ p.localpkg = pkgs[0]
+ }
+
+ for i, pt := range predeclared() {
+ p.typIndex[pt] = uint64(i)
+ }
+ if len(p.typIndex) > predeclReserved {
+ panic(internalErrorf("too many predeclared types: %d > %d", len(p.typIndex), predeclReserved))
+ }
+
+ // Initialize work queue with exported declarations.
+ for _, pkg := range pkgs {
+ scope := pkg.Scope()
+ for _, name := range scope.Names() {
+ if token.IsExported(name) {
+ p.pushDecl(scope.Lookup(name))
+ }
+ }
+
+ if bundle {
+ // Ensure pkg and its imports are included in the index.
+ p.allPkgs[pkg] = true
+ for _, imp := range pkg.Imports() {
+ p.allPkgs[imp] = true
+ }
+ }
+ }
+
+ // Loop until no more work.
+ for !p.declTodo.empty() {
+ p.doDecl(p.declTodo.popHead())
+ }
+
+ // Produce index of offset of each file record in files.
+ var files intWriter
+ var fileOffset []uint64 // fileOffset[i] is offset in files of file encoded as i
+ if p.shallow {
+ fileOffset = make([]uint64, len(p.fileInfos))
+ for i, info := range p.fileInfos {
+ fileOffset[i] = uint64(files.Len())
+ p.encodeFile(&files, info.file, info.needed)
+ }
+ }
+
+ // Append indices to data0 section.
+ dataLen := uint64(p.data0.Len())
+ w := p.newWriter()
+ w.writeIndex(p.declIndex)
+
+ if bundle {
+ w.uint64(uint64(len(pkgs)))
+ for _, pkg := range pkgs {
+ w.pkg(pkg)
+ imps := pkg.Imports()
+ w.uint64(uint64(len(imps)))
+ for _, imp := range imps {
+ w.pkg(imp)
+ }
+ }
+ }
+ w.flush()
+
+ // Assemble header.
+ var hdr intWriter
+ if bundle {
+ hdr.uint64(bundleVersion)
+ }
+ hdr.uint64(uint64(p.version))
+ hdr.uint64(uint64(p.strings.Len()))
+ if p.shallow {
+ hdr.uint64(uint64(files.Len()))
+ hdr.uint64(uint64(len(fileOffset)))
+ for _, offset := range fileOffset {
+ hdr.uint64(offset)
+ }
+ }
+ hdr.uint64(dataLen)
+
+ // Flush output.
+ io.Copy(out, &hdr)
+ io.Copy(out, &p.strings)
+ if p.shallow {
+ io.Copy(out, &files)
+ }
+ io.Copy(out, &p.data0)
+
+ return nil
+}
+
+// encodeFile writes to w a representation of the file sufficient to
+// faithfully restore position information about all needed offsets.
+// Mutates the needed array.
+func (p *iexporter) encodeFile(w *intWriter, file *token.File, needed []uint64) {
+ _ = needed[0] // precondition: needed is non-empty
+
+ w.uint64(p.stringOff(file.Name()))
+
+ size := uint64(file.Size())
+ w.uint64(size)
+
+ // Sort the set of needed offsets. Duplicates are harmless.
+ sort.Slice(needed, func(i, j int) bool { return needed[i] < needed[j] })
+
+ lines := tokeninternal.GetLines(file) // byte offset of each line start
+ w.uint64(uint64(len(lines)))
+
+ // Rather than record the entire array of line start offsets,
+ // we save only a sparse list of (index, offset) pairs for
+ // the start of each line that contains a needed position.
+ var sparse [][2]int // (index, offset) pairs
+outer:
+ for i, lineStart := range lines {
+ lineEnd := size
+ if i < len(lines)-1 {
+ lineEnd = uint64(lines[i+1])
+ }
+ // Does this line contains a needed offset?
+ if needed[0] < lineEnd {
+ sparse = append(sparse, [2]int{i, lineStart})
+ for needed[0] < lineEnd {
+ needed = needed[1:]
+ if len(needed) == 0 {
+ break outer
+ }
+ }
+ }
+ }
+
+ // Delta-encode the columns.
+ w.uint64(uint64(len(sparse)))
+ var prev [2]int
+ for _, pair := range sparse {
+ w.uint64(uint64(pair[0] - prev[0]))
+ w.uint64(uint64(pair[1] - prev[1]))
+ prev = pair
+ }
+}
+
+// writeIndex writes out an object index. mainIndex indicates whether
+// we're writing out the main index, which is also read by
+// non-compiler tools and includes a complete package description
+// (i.e., name and height).
+func (w *exportWriter) writeIndex(index map[types.Object]uint64) {
+ type pkgObj struct {
+ obj types.Object
+ name string // qualified name; differs from obj.Name for type params
+ }
+ // Build a map from packages to objects from that package.
+ pkgObjs := map[*types.Package][]pkgObj{}
+
+ // For the main index, make sure to include every package that
+ // we reference, even if we're not exporting (or reexporting)
+ // any symbols from it.
+ if w.p.localpkg != nil {
+ pkgObjs[w.p.localpkg] = nil
+ }
+ for pkg := range w.p.allPkgs {
+ pkgObjs[pkg] = nil
+ }
+
+ for obj := range index {
+ name := w.p.exportName(obj)
+ pkgObjs[obj.Pkg()] = append(pkgObjs[obj.Pkg()], pkgObj{obj, name})
+ }
+
+ var pkgs []*types.Package
+ for pkg, objs := range pkgObjs {
+ pkgs = append(pkgs, pkg)
+
+ sort.Slice(objs, func(i, j int) bool {
+ return objs[i].name < objs[j].name
+ })
+ }
+
+ sort.Slice(pkgs, func(i, j int) bool {
+ return w.exportPath(pkgs[i]) < w.exportPath(pkgs[j])
+ })
+
+ w.uint64(uint64(len(pkgs)))
+ for _, pkg := range pkgs {
+ w.string(w.exportPath(pkg))
+ w.string(pkg.Name())
+ w.uint64(uint64(0)) // package height is not needed for go/types
+
+ objs := pkgObjs[pkg]
+ w.uint64(uint64(len(objs)))
+ for _, obj := range objs {
+ w.string(obj.name)
+ w.uint64(index[obj.obj])
+ }
+ }
+}
+
+// exportName returns the 'exported' name of an object. It differs from
+// obj.Name() only for type parameters (see tparamExportName for details).
+func (p *iexporter) exportName(obj types.Object) (res string) {
+ if name := p.tparamNames[obj]; name != "" {
+ return name
+ }
+ return obj.Name()
+}
+
+type iexporter struct {
+ fset *token.FileSet
+ out *bytes.Buffer
+ version int
+
+ shallow bool // don't put types from other packages in the index
+ objEncoder *objectpath.Encoder // encodes objects from other packages in shallow mode; lazily allocated
+ localpkg *types.Package // (nil in bundle mode)
+
+ // allPkgs tracks all packages that have been referenced by
+ // the export data, so we can ensure to include them in the
+ // main index.
+ allPkgs map[*types.Package]bool
+
+ declTodo objQueue
+
+ strings intWriter
+ stringIndex map[string]uint64
+
+ // In shallow mode, object positions are encoded as (file, offset).
+ // Each file is recorded as a line-number table.
+ // Only the lines of needed positions are saved faithfully.
+ fileInfo map[*token.File]uint64 // value is index in fileInfos
+ fileInfos []*filePositions
+
+ data0 intWriter
+ declIndex map[types.Object]uint64
+ tparamNames map[types.Object]string // typeparam->exported name
+ typIndex map[types.Type]uint64
+
+ indent int // for tracing support
+}
+
+type filePositions struct {
+ file *token.File
+ needed []uint64 // unordered list of needed file offsets
+}
+
+func (p *iexporter) trace(format string, args ...interface{}) {
+ if !trace {
+ // Call sites should also be guarded, but having this check here allows
+ // easily enabling/disabling debug trace statements.
+ return
+ }
+ fmt.Printf(strings.Repeat("..", p.indent)+format+"\n", args...)
+}
+
+// objectpathEncoder returns the lazily allocated objectpath.Encoder to use
+// when encoding objects in other packages during shallow export.
+//
+// Using a shared Encoder amortizes some of cost of objectpath search.
+func (p *iexporter) objectpathEncoder() *objectpath.Encoder {
+ if p.objEncoder == nil {
+ p.objEncoder = new(objectpath.Encoder)
+ }
+ return p.objEncoder
+}
+
+// stringOff returns the offset of s within the string section.
+// If not already present, it's added to the end.
+func (p *iexporter) stringOff(s string) uint64 {
+ off, ok := p.stringIndex[s]
+ if !ok {
+ off = uint64(p.strings.Len())
+ p.stringIndex[s] = off
+
+ p.strings.uint64(uint64(len(s)))
+ p.strings.WriteString(s)
+ }
+ return off
+}
+
+// fileIndexAndOffset returns the index of the token.File and the byte offset of pos within it.
+func (p *iexporter) fileIndexAndOffset(file *token.File, pos token.Pos) (uint64, uint64) {
+ index, ok := p.fileInfo[file]
+ if !ok {
+ index = uint64(len(p.fileInfo))
+ p.fileInfos = append(p.fileInfos, &filePositions{file: file})
+ if p.fileInfo == nil {
+ p.fileInfo = make(map[*token.File]uint64)
+ }
+ p.fileInfo[file] = index
+ }
+ // Record each needed offset.
+ info := p.fileInfos[index]
+ offset := uint64(file.Offset(pos))
+ info.needed = append(info.needed, offset)
+
+ return index, offset
+}
+
+// pushDecl adds n to the declaration work queue, if not already present.
+func (p *iexporter) pushDecl(obj types.Object) {
+ // Package unsafe is known to the compiler and predeclared.
+ // Caller should not ask us to do export it.
+ if obj.Pkg() == types.Unsafe {
+ panic("cannot export package unsafe")
+ }
+
+ // Shallow export data: don't index decls from other packages.
+ if p.shallow && obj.Pkg() != p.localpkg {
+ return
+ }
+
+ if _, ok := p.declIndex[obj]; ok {
+ return
+ }
+
+ p.declIndex[obj] = ^uint64(0) // mark obj present in work queue
+ p.declTodo.pushTail(obj)
+}
+
+// exportWriter handles writing out individual data section chunks.
+type exportWriter struct {
+ p *iexporter
+
+ data intWriter
+ prevFile string
+ prevLine int64
+ prevColumn int64
+}
+
+func (w *exportWriter) exportPath(pkg *types.Package) string {
+ if pkg == w.p.localpkg {
+ return ""
+ }
+ return pkg.Path()
+}
+
+func (p *iexporter) doDecl(obj types.Object) {
+ if trace {
+ p.trace("exporting decl %v (%T)", obj, obj)
+ p.indent++
+ defer func() {
+ p.indent--
+ p.trace("=> %s", obj)
+ }()
+ }
+ w := p.newWriter()
+
+ switch obj := obj.(type) {
+ case *types.Var:
+ w.tag('V')
+ w.pos(obj.Pos())
+ w.typ(obj.Type(), obj.Pkg())
+
+ case *types.Func:
+ sig, _ := obj.Type().(*types.Signature)
+ if sig.Recv() != nil {
+ // We shouldn't see methods in the package scope,
+ // but the type checker may repair "func () F() {}"
+ // to "func (Invalid) F()" and then treat it like "func F()",
+ // so allow that. See golang/go#57729.
+ if sig.Recv().Type() != types.Typ[types.Invalid] {
+ panic(internalErrorf("unexpected method: %v", sig))
+ }
+ }
+
+ // Function.
+ if sig.TypeParams().Len() == 0 {
+ w.tag('F')
+ } else {
+ w.tag('G')
+ }
+ w.pos(obj.Pos())
+ // The tparam list of the function type is the declaration of the type
+ // params. So, write out the type params right now. Then those type params
+ // will be referenced via their type offset (via typOff) in all other
+ // places in the signature and function where they are used.
+ //
+ // While importing the type parameters, tparamList computes and records
+ // their export name, so that it can be later used when writing the index.
+ if tparams := sig.TypeParams(); tparams.Len() > 0 {
+ w.tparamList(obj.Name(), tparams, obj.Pkg())
+ }
+ w.signature(sig)
+
+ case *types.Const:
+ w.tag('C')
+ w.pos(obj.Pos())
+ w.value(obj.Type(), obj.Val())
+
+ case *types.TypeName:
+ t := obj.Type()
+
+ if tparam, ok := t.(*types.TypeParam); ok {
+ w.tag('P')
+ w.pos(obj.Pos())
+ constraint := tparam.Constraint()
+ if p.version >= iexportVersionGo1_18 {
+ implicit := false
+ if iface, _ := constraint.(*types.Interface); iface != nil {
+ implicit = iface.IsImplicit()
+ }
+ w.bool(implicit)
+ }
+ w.typ(constraint, obj.Pkg())
+ break
+ }
+
+ if obj.IsAlias() {
+ w.tag('A')
+ w.pos(obj.Pos())
+ w.typ(t, obj.Pkg())
+ break
+ }
+
+ // Defined type.
+ named, ok := t.(*types.Named)
+ if !ok {
+ panic(internalErrorf("%s is not a defined type", t))
+ }
+
+ if named.TypeParams().Len() == 0 {
+ w.tag('T')
+ } else {
+ w.tag('U')
+ }
+ w.pos(obj.Pos())
+
+ if named.TypeParams().Len() > 0 {
+ // While importing the type parameters, tparamList computes and records
+ // their export name, so that it can be later used when writing the index.
+ w.tparamList(obj.Name(), named.TypeParams(), obj.Pkg())
+ }
+
+ underlying := obj.Type().Underlying()
+ w.typ(underlying, obj.Pkg())
+
+ if types.IsInterface(t) {
+ break
+ }
+
+ n := named.NumMethods()
+ w.uint64(uint64(n))
+ for i := 0; i < n; i++ {
+ m := named.Method(i)
+ w.pos(m.Pos())
+ w.string(m.Name())
+ sig, _ := m.Type().(*types.Signature)
+
+ // Receiver type parameters are type arguments of the receiver type, so
+ // their name must be qualified before exporting recv.
+ if rparams := sig.RecvTypeParams(); rparams.Len() > 0 {
+ prefix := obj.Name() + "." + m.Name()
+ for i := 0; i < rparams.Len(); i++ {
+ rparam := rparams.At(i)
+ name := tparamExportName(prefix, rparam)
+ w.p.tparamNames[rparam.Obj()] = name
+ }
+ }
+ w.param(sig.Recv())
+ w.signature(sig)
+ }
+
+ default:
+ panic(internalErrorf("unexpected object: %v", obj))
+ }
+
+ p.declIndex[obj] = w.flush()
+}
+
+func (w *exportWriter) tag(tag byte) {
+ w.data.WriteByte(tag)
+}
+
+func (w *exportWriter) pos(pos token.Pos) {
+ if w.p.shallow {
+ w.posV2(pos)
+ } else if w.p.version >= iexportVersionPosCol {
+ w.posV1(pos)
+ } else {
+ w.posV0(pos)
+ }
+}
+
+// posV2 encoding (used only in shallow mode) records positions as
+// (file, offset), where file is the index in the token.File table
+// (which records the file name and newline offsets) and offset is a
+// byte offset. It effectively ignores //line directives.
+func (w *exportWriter) posV2(pos token.Pos) {
+ if pos == token.NoPos {
+ w.uint64(0)
+ return
+ }
+ file := w.p.fset.File(pos) // fset must be non-nil
+ index, offset := w.p.fileIndexAndOffset(file, pos)
+ w.uint64(1 + index)
+ w.uint64(offset)
+}
+
+func (w *exportWriter) posV1(pos token.Pos) {
+ if w.p.fset == nil {
+ w.int64(0)
+ return
+ }
+
+ p := w.p.fset.Position(pos)
+ file := p.Filename
+ line := int64(p.Line)
+ column := int64(p.Column)
+
+ deltaColumn := (column - w.prevColumn) << 1
+ deltaLine := (line - w.prevLine) << 1
+
+ if file != w.prevFile {
+ deltaLine |= 1
+ }
+ if deltaLine != 0 {
+ deltaColumn |= 1
+ }
+
+ w.int64(deltaColumn)
+ if deltaColumn&1 != 0 {
+ w.int64(deltaLine)
+ if deltaLine&1 != 0 {
+ w.string(file)
+ }
+ }
+
+ w.prevFile = file
+ w.prevLine = line
+ w.prevColumn = column
+}
+
+func (w *exportWriter) posV0(pos token.Pos) {
+ if w.p.fset == nil {
+ w.int64(0)
+ return
+ }
+
+ p := w.p.fset.Position(pos)
+ file := p.Filename
+ line := int64(p.Line)
+
+ // When file is the same as the last position (common case),
+ // we can save a few bytes by delta encoding just the line
+ // number.
+ //
+ // Note: Because data objects may be read out of order (or not
+ // at all), we can only apply delta encoding within a single
+ // object. This is handled implicitly by tracking prevFile and
+ // prevLine as fields of exportWriter.
+
+ if file == w.prevFile {
+ delta := line - w.prevLine
+ w.int64(delta)
+ if delta == deltaNewFile {
+ w.int64(-1)
+ }
+ } else {
+ w.int64(deltaNewFile)
+ w.int64(line) // line >= 0
+ w.string(file)
+ w.prevFile = file
+ }
+ w.prevLine = line
+}
+
+func (w *exportWriter) pkg(pkg *types.Package) {
+ // Ensure any referenced packages are declared in the main index.
+ w.p.allPkgs[pkg] = true
+
+ w.string(w.exportPath(pkg))
+}
+
+func (w *exportWriter) qualifiedType(obj *types.TypeName) {
+ name := w.p.exportName(obj)
+
+ // Ensure any referenced declarations are written out too.
+ w.p.pushDecl(obj)
+ w.string(name)
+ w.pkg(obj.Pkg())
+}
+
+// TODO(rfindley): what does 'pkg' even mean here? It would be better to pass
+// it in explicitly into signatures and structs that may use it for
+// constructing fields.
+func (w *exportWriter) typ(t types.Type, pkg *types.Package) {
+ w.data.uint64(w.p.typOff(t, pkg))
+}
+
+func (p *iexporter) newWriter() *exportWriter {
+ return &exportWriter{p: p}
+}
+
+func (w *exportWriter) flush() uint64 {
+ off := uint64(w.p.data0.Len())
+ io.Copy(&w.p.data0, &w.data)
+ return off
+}
+
+func (p *iexporter) typOff(t types.Type, pkg *types.Package) uint64 {
+ off, ok := p.typIndex[t]
+ if !ok {
+ w := p.newWriter()
+ w.doTyp(t, pkg)
+ off = predeclReserved + w.flush()
+ p.typIndex[t] = off
+ }
+ return off
+}
+
+func (w *exportWriter) startType(k itag) {
+ w.data.uint64(uint64(k))
+}
+
+func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) {
+ if trace {
+ w.p.trace("exporting type %s (%T)", t, t)
+ w.p.indent++
+ defer func() {
+ w.p.indent--
+ w.p.trace("=> %s", t)
+ }()
+ }
+ switch t := t.(type) {
+ case *types.Named:
+ if targs := t.TypeArgs(); targs.Len() > 0 {
+ w.startType(instanceType)
+ // TODO(rfindley): investigate if this position is correct, and if it
+ // matters.
+ w.pos(t.Obj().Pos())
+ w.typeList(targs, pkg)
+ w.typ(t.Origin(), pkg)
+ return
+ }
+ w.startType(definedType)
+ w.qualifiedType(t.Obj())
+
+ case *types.TypeParam:
+ w.startType(typeParamType)
+ w.qualifiedType(t.Obj())
+
+ case *types.Pointer:
+ w.startType(pointerType)
+ w.typ(t.Elem(), pkg)
+
+ case *types.Slice:
+ w.startType(sliceType)
+ w.typ(t.Elem(), pkg)
+
+ case *types.Array:
+ w.startType(arrayType)
+ w.uint64(uint64(t.Len()))
+ w.typ(t.Elem(), pkg)
+
+ case *types.Chan:
+ w.startType(chanType)
+ // 1 RecvOnly; 2 SendOnly; 3 SendRecv
+ var dir uint64
+ switch t.Dir() {
+ case types.RecvOnly:
+ dir = 1
+ case types.SendOnly:
+ dir = 2
+ case types.SendRecv:
+ dir = 3
+ }
+ w.uint64(dir)
+ w.typ(t.Elem(), pkg)
+
+ case *types.Map:
+ w.startType(mapType)
+ w.typ(t.Key(), pkg)
+ w.typ(t.Elem(), pkg)
+
+ case *types.Signature:
+ w.startType(signatureType)
+ w.pkg(pkg)
+ w.signature(t)
+
+ case *types.Struct:
+ w.startType(structType)
+ n := t.NumFields()
+ // Even for struct{} we must emit some qualifying package, because that's
+ // what the compiler does, and thus that's what the importer expects.
+ fieldPkg := pkg
+ if n > 0 {
+ fieldPkg = t.Field(0).Pkg()
+ }
+ if fieldPkg == nil {
+ // TODO(rfindley): improve this very hacky logic.
+ //
+ // The importer expects a package to be set for all struct types, even
+ // those with no fields. A better encoding might be to set NumFields
+ // before pkg. setPkg panics with a nil package, which may be possible
+ // to reach with invalid packages (and perhaps valid packages, too?), so
+ // (arbitrarily) set the localpkg if available.
+ //
+ // Alternatively, we may be able to simply guarantee that pkg != nil, by
+ // reconsidering the encoding of constant values.
+ if w.p.shallow {
+ fieldPkg = w.p.localpkg
+ } else {
+ panic(internalErrorf("no package to set for empty struct"))
+ }
+ }
+ w.pkg(fieldPkg)
+ w.uint64(uint64(n))
+
+ for i := 0; i < n; i++ {
+ f := t.Field(i)
+ if w.p.shallow {
+ w.objectPath(f)
+ }
+ w.pos(f.Pos())
+ w.string(f.Name()) // unexported fields implicitly qualified by prior setPkg
+ w.typ(f.Type(), fieldPkg)
+ w.bool(f.Anonymous())
+ w.string(t.Tag(i)) // note (or tag)
+ }
+
+ case *types.Interface:
+ w.startType(interfaceType)
+ w.pkg(pkg)
+
+ n := t.NumEmbeddeds()
+ w.uint64(uint64(n))
+ for i := 0; i < n; i++ {
+ ft := t.EmbeddedType(i)
+ tPkg := pkg
+ if named, _ := ft.(*types.Named); named != nil {
+ w.pos(named.Obj().Pos())
+ } else {
+ w.pos(token.NoPos)
+ }
+ w.typ(ft, tPkg)
+ }
+
+ // See comment for struct fields. In shallow mode we change the encoding
+ // for interface methods that are promoted from other packages.
+
+ n = t.NumExplicitMethods()
+ w.uint64(uint64(n))
+ for i := 0; i < n; i++ {
+ m := t.ExplicitMethod(i)
+ if w.p.shallow {
+ w.objectPath(m)
+ }
+ w.pos(m.Pos())
+ w.string(m.Name())
+ sig, _ := m.Type().(*types.Signature)
+ w.signature(sig)
+ }
+
+ case *types.Union:
+ w.startType(unionType)
+ nt := t.Len()
+ w.uint64(uint64(nt))
+ for i := 0; i < nt; i++ {
+ term := t.Term(i)
+ w.bool(term.Tilde())
+ w.typ(term.Type(), pkg)
+ }
+
+ default:
+ panic(internalErrorf("unexpected type: %v, %v", t, reflect.TypeOf(t)))
+ }
+}
+
+// objectPath writes the package and objectPath to use to look up obj in a
+// different package, when encoding in "shallow" mode.
+//
+// When doing a shallow import, the importer creates only the local package,
+// and requests package symbols for dependencies from the client.
+// However, certain types defined in the local package may hold objects defined
+// (perhaps deeply) within another package.
+//
+// For example, consider the following:
+//
+// package a
+// func F() chan * map[string] struct { X int }
+//
+// package b
+// import "a"
+// var B = a.F()
+//
+// In this example, the type of b.B holds fields defined in package a.
+// In order to have the correct canonical objects for the field defined in the
+// type of B, they are encoded as objectPaths and later looked up in the
+// importer. The same problem applies to interface methods.
+func (w *exportWriter) objectPath(obj types.Object) {
+ if obj.Pkg() == nil || obj.Pkg() == w.p.localpkg {
+ // obj.Pkg() may be nil for the builtin error.Error.
+ // In this case, or if obj is declared in the local package, no need to
+ // encode.
+ w.string("")
+ return
+ }
+ objectPath, err := w.p.objectpathEncoder().For(obj)
+ if err != nil {
+ // Fall back to the empty string, which will cause the importer to create a
+ // new object, which matches earlier behavior. Creating a new object is
+ // sufficient for many purposes (such as type checking), but causes certain
+ // references algorithms to fail (golang/go#60819). However, we didn't
+ // notice this problem during months of gopls@v0.12.0 testing.
+ //
+ // TODO(golang/go#61674): this workaround is insufficient, as in the case
+ // where the field forwarded from an instantiated type that may not appear
+ // in the export data of the original package:
+ //
+ // // package a
+ // type A[P any] struct{ F P }
+ //
+ // // package b
+ // type B a.A[int]
+ //
+ // We need to update references algorithms not to depend on this
+ // de-duplication, at which point we may want to simply remove the
+ // workaround here.
+ w.string("")
+ return
+ }
+ w.string(string(objectPath))
+ w.pkg(obj.Pkg())
+}
+
+func (w *exportWriter) signature(sig *types.Signature) {
+ w.paramList(sig.Params())
+ w.paramList(sig.Results())
+ if sig.Params().Len() > 0 {
+ w.bool(sig.Variadic())
+ }
+}
+
+func (w *exportWriter) typeList(ts *types.TypeList, pkg *types.Package) {
+ w.uint64(uint64(ts.Len()))
+ for i := 0; i < ts.Len(); i++ {
+ w.typ(ts.At(i), pkg)
+ }
+}
+
+func (w *exportWriter) tparamList(prefix string, list *types.TypeParamList, pkg *types.Package) {
+ ll := uint64(list.Len())
+ w.uint64(ll)
+ for i := 0; i < list.Len(); i++ {
+ tparam := list.At(i)
+ // Set the type parameter exportName before exporting its type.
+ exportName := tparamExportName(prefix, tparam)
+ w.p.tparamNames[tparam.Obj()] = exportName
+ w.typ(list.At(i), pkg)
+ }
+}
+
+const blankMarker = "$"
+
+// tparamExportName returns the 'exported' name of a type parameter, which
+// differs from its actual object name: it is prefixed with a qualifier, and
+// blank type parameter names are disambiguated by their index in the type
+// parameter list.
+func tparamExportName(prefix string, tparam *types.TypeParam) string {
+ assert(prefix != "")
+ name := tparam.Obj().Name()
+ if name == "_" {
+ name = blankMarker + strconv.Itoa(tparam.Index())
+ }
+ return prefix + "." + name
+}
+
+// tparamName returns the real name of a type parameter, after stripping its
+// qualifying prefix and reverting blank-name encoding. See tparamExportName
+// for details.
+func tparamName(exportName string) string {
+ // Remove the "path" from the type param name that makes it unique.
+ ix := strings.LastIndex(exportName, ".")
+ if ix < 0 {
+ errorf("malformed type parameter export name %s: missing prefix", exportName)
+ }
+ name := exportName[ix+1:]
+ if strings.HasPrefix(name, blankMarker) {
+ return "_"
+ }
+ return name
+}
+
+func (w *exportWriter) paramList(tup *types.Tuple) {
+ n := tup.Len()
+ w.uint64(uint64(n))
+ for i := 0; i < n; i++ {
+ w.param(tup.At(i))
+ }
+}
+
+func (w *exportWriter) param(obj types.Object) {
+ w.pos(obj.Pos())
+ w.localIdent(obj)
+ w.typ(obj.Type(), obj.Pkg())
+}
+
+func (w *exportWriter) value(typ types.Type, v constant.Value) {
+ w.typ(typ, nil)
+ if w.p.version >= iexportVersionGo1_18 {
+ w.int64(int64(v.Kind()))
+ }
+
+ if v.Kind() == constant.Unknown {
+ // golang/go#60605: treat unknown constant values as if they have invalid type
+ //
+ // This loses some fidelity over the package type-checked from source, but that
+ // is acceptable.
+ //
+ // TODO(rfindley): we should switch on the recorded constant kind rather
+ // than the constant type
+ return
+ }
+
+ switch b := typ.Underlying().(*types.Basic); b.Info() & types.IsConstType {
+ case types.IsBoolean:
+ w.bool(constant.BoolVal(v))
+ case types.IsInteger:
+ var i big.Int
+ if i64, exact := constant.Int64Val(v); exact {
+ i.SetInt64(i64)
+ } else if ui64, exact := constant.Uint64Val(v); exact {
+ i.SetUint64(ui64)
+ } else {
+ i.SetString(v.ExactString(), 10)
+ }
+ w.mpint(&i, typ)
+ case types.IsFloat:
+ f := constantToFloat(v)
+ w.mpfloat(f, typ)
+ case types.IsComplex:
+ w.mpfloat(constantToFloat(constant.Real(v)), typ)
+ w.mpfloat(constantToFloat(constant.Imag(v)), typ)
+ case types.IsString:
+ w.string(constant.StringVal(v))
+ default:
+ if b.Kind() == types.Invalid {
+ // package contains type errors
+ break
+ }
+ panic(internalErrorf("unexpected type %v (%v)", typ, typ.Underlying()))
+ }
+}
+
+// constantToFloat converts a constant.Value with kind constant.Float to a
+// big.Float.
+func constantToFloat(x constant.Value) *big.Float {
+ x = constant.ToFloat(x)
+ // Use the same floating-point precision (512) as cmd/compile
+ // (see Mpprec in cmd/compile/internal/gc/mpfloat.go).
+ const mpprec = 512
+ var f big.Float
+ f.SetPrec(mpprec)
+ if v, exact := constant.Float64Val(x); exact {
+ // float64
+ f.SetFloat64(v)
+ } else if num, denom := constant.Num(x), constant.Denom(x); num.Kind() == constant.Int {
+ // TODO(gri): add big.Rat accessor to constant.Value.
+ n := valueToRat(num)
+ d := valueToRat(denom)
+ f.SetRat(n.Quo(n, d))
+ } else {
+ // Value too large to represent as a fraction => inaccessible.
+ // TODO(gri): add big.Float accessor to constant.Value.
+ _, ok := f.SetString(x.ExactString())
+ assert(ok)
+ }
+ return &f
+}
+
+func valueToRat(x constant.Value) *big.Rat {
+ // Convert little-endian to big-endian.
+ // I can't believe this is necessary.
+ bytes := constant.Bytes(x)
+ for i := 0; i < len(bytes)/2; i++ {
+ bytes[i], bytes[len(bytes)-1-i] = bytes[len(bytes)-1-i], bytes[i]
+ }
+ return new(big.Rat).SetInt(new(big.Int).SetBytes(bytes))
+}
+
+// mpint exports a multi-precision integer.
+//
+// For unsigned types, small values are written out as a single
+// byte. Larger values are written out as a length-prefixed big-endian
+// byte string, where the length prefix is encoded as its complement.
+// For example, bytes 0, 1, and 2 directly represent the integer
+// values 0, 1, and 2; while bytes 255, 254, and 253 indicate a 1-,
+// 2-, and 3-byte big-endian string follow.
+//
+// Encoding for signed types use the same general approach as for
+// unsigned types, except small values use zig-zag encoding and the
+// bottom bit of length prefix byte for large values is reserved as a
+// sign bit.
+//
+// The exact boundary between small and large encodings varies
+// according to the maximum number of bytes needed to encode a value
+// of type typ. As a special case, 8-bit types are always encoded as a
+// single byte.
+//
+// TODO(mdempsky): Is this level of complexity really worthwhile?
+func (w *exportWriter) mpint(x *big.Int, typ types.Type) {
+ basic, ok := typ.Underlying().(*types.Basic)
+ if !ok {
+ panic(internalErrorf("unexpected type %v (%T)", typ.Underlying(), typ.Underlying()))
+ }
+
+ signed, maxBytes := intSize(basic)
+
+ negative := x.Sign() < 0
+ if !signed && negative {
+ panic(internalErrorf("negative unsigned integer; type %v, value %v", typ, x))
+ }
+
+ b := x.Bytes()
+ if len(b) > 0 && b[0] == 0 {
+ panic(internalErrorf("leading zeros"))
+ }
+ if uint(len(b)) > maxBytes {
+ panic(internalErrorf("bad mpint length: %d > %d (type %v, value %v)", len(b), maxBytes, typ, x))
+ }
+
+ maxSmall := 256 - maxBytes
+ if signed {
+ maxSmall = 256 - 2*maxBytes
+ }
+ if maxBytes == 1 {
+ maxSmall = 256
+ }
+
+ // Check if x can use small value encoding.
+ if len(b) <= 1 {
+ var ux uint
+ if len(b) == 1 {
+ ux = uint(b[0])
+ }
+ if signed {
+ ux <<= 1
+ if negative {
+ ux--
+ }
+ }
+ if ux < maxSmall {
+ w.data.WriteByte(byte(ux))
+ return
+ }
+ }
+
+ n := 256 - uint(len(b))
+ if signed {
+ n = 256 - 2*uint(len(b))
+ if negative {
+ n |= 1
+ }
+ }
+ if n < maxSmall || n >= 256 {
+ panic(internalErrorf("encoding mistake: %d, %v, %v => %d", len(b), signed, negative, n))
+ }
+
+ w.data.WriteByte(byte(n))
+ w.data.Write(b)
+}
+
+// mpfloat exports a multi-precision floating point number.
+//
+// The number's value is decomposed into mantissa × 2**exponent, where
+// mantissa is an integer. The value is written out as mantissa (as a
+// multi-precision integer) and then the exponent, except exponent is
+// omitted if mantissa is zero.
+func (w *exportWriter) mpfloat(f *big.Float, typ types.Type) {
+ if f.IsInf() {
+ panic("infinite constant")
+ }
+
+ // Break into f = mant × 2**exp, with 0.5 <= mant < 1.
+ var mant big.Float
+ exp := int64(f.MantExp(&mant))
+
+ // Scale so that mant is an integer.
+ prec := mant.MinPrec()
+ mant.SetMantExp(&mant, int(prec))
+ exp -= int64(prec)
+
+ manti, acc := mant.Int(nil)
+ if acc != big.Exact {
+ panic(internalErrorf("mantissa scaling failed for %f (%s)", f, acc))
+ }
+ w.mpint(manti, typ)
+ if manti.Sign() != 0 {
+ w.int64(exp)
+ }
+}
+
+func (w *exportWriter) bool(b bool) bool {
+ var x uint64
+ if b {
+ x = 1
+ }
+ w.uint64(x)
+ return b
+}
+
+func (w *exportWriter) int64(x int64) { w.data.int64(x) }
+func (w *exportWriter) uint64(x uint64) { w.data.uint64(x) }
+func (w *exportWriter) string(s string) { w.uint64(w.p.stringOff(s)) }
+
+func (w *exportWriter) localIdent(obj types.Object) {
+ // Anonymous parameters.
+ if obj == nil {
+ w.string("")
+ return
+ }
+
+ name := obj.Name()
+ if name == "_" {
+ w.string("_")
+ return
+ }
+
+ w.string(name)
+}
+
+type intWriter struct {
+ bytes.Buffer
+}
+
+func (w *intWriter) int64(x int64) {
+ var buf [binary.MaxVarintLen64]byte
+ n := binary.PutVarint(buf[:], x)
+ w.Write(buf[:n])
+}
+
+func (w *intWriter) uint64(x uint64) {
+ var buf [binary.MaxVarintLen64]byte
+ n := binary.PutUvarint(buf[:], x)
+ w.Write(buf[:n])
+}
+
+func assert(cond bool) {
+ if !cond {
+ panic("internal error: assertion failed")
+ }
+}
+
+// The below is copied from go/src/cmd/compile/internal/gc/syntax.go.
+
+// objQueue is a FIFO queue of types.Object. The zero value of objQueue is
+// a ready-to-use empty queue.
+type objQueue struct {
+ ring []types.Object
+ head, tail int
+}
+
+// empty returns true if q contains no Nodes.
+func (q *objQueue) empty() bool {
+ return q.head == q.tail
+}
+
+// pushTail appends n to the tail of the queue.
+func (q *objQueue) pushTail(obj types.Object) {
+ if len(q.ring) == 0 {
+ q.ring = make([]types.Object, 16)
+ } else if q.head+len(q.ring) == q.tail {
+ // Grow the ring.
+ nring := make([]types.Object, len(q.ring)*2)
+ // Copy the old elements.
+ part := q.ring[q.head%len(q.ring):]
+ if q.tail-q.head <= len(part) {
+ part = part[:q.tail-q.head]
+ copy(nring, part)
+ } else {
+ pos := copy(nring, part)
+ copy(nring[pos:], q.ring[:q.tail%len(q.ring)])
+ }
+ q.ring, q.head, q.tail = nring, 0, q.tail-q.head
+ }
+
+ q.ring[q.tail%len(q.ring)] = obj
+ q.tail++
+}
+
+// popHead pops a node from the head of the queue. It panics if q is empty.
+func (q *objQueue) popHead() types.Object {
+ if q.empty() {
+ panic("dequeue empty")
+ }
+ obj := q.ring[q.head%len(q.ring)]
+ q.head++
+ return obj
+}
+
+// internalError represents an error generated inside this package.
+type internalError string
+
+func (e internalError) Error() string { return "gcimporter: " + string(e) }
+
+// TODO(adonovan): make this call panic, so that it's symmetric with errorf.
+// Otherwise it's easy to forget to do anything with the error.
+//
+// TODO(adonovan): also, consider switching the names "errorf" and
+// "internalErrorf" as the former is used for bugs, whose cause is
+// internal inconsistency, whereas the latter is used for ordinary
+// situations like bad input, whose cause is external.
+func internalErrorf(format string, args ...interface{}) error {
+ return internalError(fmt.Sprintf(format, args...))
+}
diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go
new file mode 100644
index 0000000000..9bde15e3bc
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go
@@ -0,0 +1,1082 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Indexed package import.
+// See cmd/compile/internal/gc/iexport.go for the export data format.
+
+// This file is a copy of $GOROOT/src/go/internal/gcimporter/iimport.go.
+
+package gcimporter
+
+import (
+ "bytes"
+ "encoding/binary"
+ "fmt"
+ "go/constant"
+ "go/token"
+ "go/types"
+ "io"
+ "math/big"
+ "sort"
+ "strings"
+
+ "golang.org/x/tools/go/types/objectpath"
+)
+
+type intReader struct {
+ *bytes.Reader
+ path string
+}
+
+func (r *intReader) int64() int64 {
+ i, err := binary.ReadVarint(r.Reader)
+ if err != nil {
+ errorf("import %q: read varint error: %v", r.path, err)
+ }
+ return i
+}
+
+func (r *intReader) uint64() uint64 {
+ i, err := binary.ReadUvarint(r.Reader)
+ if err != nil {
+ errorf("import %q: read varint error: %v", r.path, err)
+ }
+ return i
+}
+
+// Keep this in sync with constants in iexport.go.
+const (
+ iexportVersionGo1_11 = 0
+ iexportVersionPosCol = 1
+ iexportVersionGo1_18 = 2
+ iexportVersionGenerics = 2
+
+ iexportVersionCurrent = 2
+)
+
+type ident struct {
+ pkg *types.Package
+ name string
+}
+
+const predeclReserved = 32
+
+type itag uint64
+
+const (
+ // Types
+ definedType itag = iota
+ pointerType
+ sliceType
+ arrayType
+ chanType
+ mapType
+ signatureType
+ structType
+ interfaceType
+ typeParamType
+ instanceType
+ unionType
+)
+
+// IImportData imports a package from the serialized package data
+// and returns 0 and a reference to the package.
+// If the export data version is not recognized or the format is otherwise
+// compromised, an error is returned.
+func IImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (int, *types.Package, error) {
+ pkgs, err := iimportCommon(fset, GetPackagesFromMap(imports), data, false, path, false, nil)
+ if err != nil {
+ return 0, nil, err
+ }
+ return 0, pkgs[0], nil
+}
+
+// IImportBundle imports a set of packages from the serialized package bundle.
+func IImportBundle(fset *token.FileSet, imports map[string]*types.Package, data []byte) ([]*types.Package, error) {
+ return iimportCommon(fset, GetPackagesFromMap(imports), data, true, "", false, nil)
+}
+
+// A GetPackagesFunc function obtains the non-nil symbols for a set of
+// packages, creating and recursively importing them as needed. An
+// implementation should store each package symbol is in the Pkg
+// field of the items array.
+//
+// Any error causes importing to fail. This can be used to quickly read
+// the import manifest of an export data file without fully decoding it.
+type GetPackagesFunc = func(items []GetPackagesItem) error
+
+// A GetPackagesItem is a request from the importer for the package
+// symbol of the specified name and path.
+type GetPackagesItem struct {
+ Name, Path string
+ Pkg *types.Package // to be filled in by GetPackagesFunc call
+
+ // private importer state
+ pathOffset uint64
+ nameIndex map[string]uint64
+}
+
+// GetPackagesFromMap returns a GetPackagesFunc that retrieves
+// packages from the given map of package path to package.
+//
+// The returned function may mutate m: each requested package that is not
+// found is created with types.NewPackage and inserted into m.
+func GetPackagesFromMap(m map[string]*types.Package) GetPackagesFunc {
+ return func(items []GetPackagesItem) error {
+ for i, item := range items {
+ pkg, ok := m[item.Path]
+ if !ok {
+ pkg = types.NewPackage(item.Path, item.Name)
+ m[item.Path] = pkg
+ }
+ items[i].Pkg = pkg
+ }
+ return nil
+ }
+}
+
+func iimportCommon(fset *token.FileSet, getPackages GetPackagesFunc, data []byte, bundle bool, path string, shallow bool, reportf ReportFunc) (pkgs []*types.Package, err error) {
+ const currentVersion = iexportVersionCurrent
+ version := int64(-1)
+ if !debug {
+ defer func() {
+ if e := recover(); e != nil {
+ if bundle {
+ err = fmt.Errorf("%v", e)
+ } else if version > currentVersion {
+ err = fmt.Errorf("cannot import %q (%v), export data is newer version - update tool", path, e)
+ } else {
+ err = fmt.Errorf("internal error while importing %q (%v); please report an issue", path, e)
+ }
+ }
+ }()
+ }
+
+ r := &intReader{bytes.NewReader(data), path}
+
+ if bundle {
+ if v := r.uint64(); v != bundleVersion {
+ errorf("unknown bundle format version %d", v)
+ }
+ }
+
+ version = int64(r.uint64())
+ switch version {
+ case iexportVersionGo1_18, iexportVersionPosCol, iexportVersionGo1_11:
+ default:
+ if version > iexportVersionGo1_18 {
+ errorf("unstable iexport format version %d, just rebuild compiler and std library", version)
+ } else {
+ errorf("unknown iexport format version %d", version)
+ }
+ }
+
+ sLen := int64(r.uint64())
+ var fLen int64
+ var fileOffset []uint64
+ if shallow {
+ // Shallow mode uses a different position encoding.
+ fLen = int64(r.uint64())
+ fileOffset = make([]uint64, r.uint64())
+ for i := range fileOffset {
+ fileOffset[i] = r.uint64()
+ }
+ }
+ dLen := int64(r.uint64())
+
+ whence, _ := r.Seek(0, io.SeekCurrent)
+ stringData := data[whence : whence+sLen]
+ fileData := data[whence+sLen : whence+sLen+fLen]
+ declData := data[whence+sLen+fLen : whence+sLen+fLen+dLen]
+ r.Seek(sLen+fLen+dLen, io.SeekCurrent)
+
+ p := iimporter{
+ version: int(version),
+ ipath: path,
+ shallow: shallow,
+ reportf: reportf,
+
+ stringData: stringData,
+ stringCache: make(map[uint64]string),
+ fileOffset: fileOffset,
+ fileData: fileData,
+ fileCache: make([]*token.File, len(fileOffset)),
+ pkgCache: make(map[uint64]*types.Package),
+
+ declData: declData,
+ pkgIndex: make(map[*types.Package]map[string]uint64),
+ typCache: make(map[uint64]types.Type),
+ // Separate map for typeparams, keyed by their package and unique
+ // name.
+ tparamIndex: make(map[ident]types.Type),
+
+ fake: fakeFileSet{
+ fset: fset,
+ files: make(map[string]*fileInfo),
+ },
+ }
+ defer p.fake.setLines() // set lines for files in fset
+
+ for i, pt := range predeclared() {
+ p.typCache[uint64(i)] = pt
+ }
+
+ // Gather the relevant packages from the manifest.
+ items := make([]GetPackagesItem, r.uint64())
+ for i := range items {
+ pkgPathOff := r.uint64()
+ pkgPath := p.stringAt(pkgPathOff)
+ pkgName := p.stringAt(r.uint64())
+ _ = r.uint64() // package height; unused by go/types
+
+ if pkgPath == "" {
+ pkgPath = path
+ }
+ items[i].Name = pkgName
+ items[i].Path = pkgPath
+ items[i].pathOffset = pkgPathOff
+
+ // Read index for package.
+ nameIndex := make(map[string]uint64)
+ nSyms := r.uint64()
+ // In shallow mode, only the current package (i=0) has an index.
+ assert(!(shallow && i > 0 && nSyms != 0))
+ for ; nSyms > 0; nSyms-- {
+ name := p.stringAt(r.uint64())
+ nameIndex[name] = r.uint64()
+ }
+
+ items[i].nameIndex = nameIndex
+ }
+
+ // Request packages all at once from the client,
+ // enabling a parallel implementation.
+ if err := getPackages(items); err != nil {
+ return nil, err // don't wrap this error
+ }
+
+ // Check the results and complete the index.
+ pkgList := make([]*types.Package, len(items))
+ for i, item := range items {
+ pkg := item.Pkg
+ if pkg == nil {
+ errorf("internal error: getPackages returned nil package for %q", item.Path)
+ } else if pkg.Path() != item.Path {
+ errorf("internal error: getPackages returned wrong path %q, want %q", pkg.Path(), item.Path)
+ } else if pkg.Name() != item.Name {
+ errorf("internal error: getPackages returned wrong name %s for package %q, want %s", pkg.Name(), item.Path, item.Name)
+ }
+ p.pkgCache[item.pathOffset] = pkg
+ p.pkgIndex[pkg] = item.nameIndex
+ pkgList[i] = pkg
+ }
+
+ if bundle {
+ pkgs = make([]*types.Package, r.uint64())
+ for i := range pkgs {
+ pkg := p.pkgAt(r.uint64())
+ imps := make([]*types.Package, r.uint64())
+ for j := range imps {
+ imps[j] = p.pkgAt(r.uint64())
+ }
+ pkg.SetImports(imps)
+ pkgs[i] = pkg
+ }
+ } else {
+ if len(pkgList) == 0 {
+ errorf("no packages found for %s", path)
+ panic("unreachable")
+ }
+ pkgs = pkgList[:1]
+
+ // record all referenced packages as imports
+ list := append(([]*types.Package)(nil), pkgList[1:]...)
+ sort.Sort(byPath(list))
+ pkgs[0].SetImports(list)
+ }
+
+ for _, pkg := range pkgs {
+ if pkg.Complete() {
+ continue
+ }
+
+ names := make([]string, 0, len(p.pkgIndex[pkg]))
+ for name := range p.pkgIndex[pkg] {
+ names = append(names, name)
+ }
+ sort.Strings(names)
+ for _, name := range names {
+ p.doDecl(pkg, name)
+ }
+
+ // package was imported completely and without errors
+ pkg.MarkComplete()
+ }
+
+ // SetConstraint can't be called if the constraint type is not yet complete.
+ // When type params are created in the 'P' case of (*importReader).obj(),
+ // the associated constraint type may not be complete due to recursion.
+ // Therefore, we defer calling SetConstraint there, and call it here instead
+ // after all types are complete.
+ for _, d := range p.later {
+ d.t.SetConstraint(d.constraint)
+ }
+
+ for _, typ := range p.interfaceList {
+ typ.Complete()
+ }
+
+ // Workaround for golang/go#61561. See the doc for instanceList for details.
+ for _, typ := range p.instanceList {
+ if iface, _ := typ.Underlying().(*types.Interface); iface != nil {
+ iface.Complete()
+ }
+ }
+
+ return pkgs, nil
+}
+
+type setConstraintArgs struct {
+ t *types.TypeParam
+ constraint types.Type
+}
+
+type iimporter struct {
+ version int
+ ipath string
+
+ shallow bool
+ reportf ReportFunc // if non-nil, used to report bugs
+
+ stringData []byte
+ stringCache map[uint64]string
+ fileOffset []uint64 // fileOffset[i] is offset in fileData for info about file encoded as i
+ fileData []byte
+ fileCache []*token.File // memoized decoding of file encoded as i
+ pkgCache map[uint64]*types.Package
+
+ declData []byte
+ pkgIndex map[*types.Package]map[string]uint64
+ typCache map[uint64]types.Type
+ tparamIndex map[ident]types.Type
+
+ fake fakeFileSet
+ interfaceList []*types.Interface
+
+ // Workaround for the go/types bug golang/go#61561: instances produced during
+ // instantiation may contain incomplete interfaces. Here we only complete the
+ // underlying type of the instance, which is the most common case but doesn't
+ // handle parameterized interface literals defined deeper in the type.
+ instanceList []types.Type // instances for later completion (see golang/go#61561)
+
+ // Arguments for calls to SetConstraint that are deferred due to recursive types
+ later []setConstraintArgs
+
+ indent int // for tracing support
+}
+
+func (p *iimporter) trace(format string, args ...interface{}) {
+ if !trace {
+ // Call sites should also be guarded, but having this check here allows
+ // easily enabling/disabling debug trace statements.
+ return
+ }
+ fmt.Printf(strings.Repeat("..", p.indent)+format+"\n", args...)
+}
+
+func (p *iimporter) doDecl(pkg *types.Package, name string) {
+ if debug {
+ p.trace("import decl %s", name)
+ p.indent++
+ defer func() {
+ p.indent--
+ p.trace("=> %s", name)
+ }()
+ }
+ // See if we've already imported this declaration.
+ if obj := pkg.Scope().Lookup(name); obj != nil {
+ return
+ }
+
+ off, ok := p.pkgIndex[pkg][name]
+ if !ok {
+ // In deep mode, the index should be complete. In shallow
+ // mode, we should have already recursively loaded necessary
+ // dependencies so the above Lookup succeeds.
+ errorf("%v.%v not in index", pkg, name)
+ }
+
+ r := &importReader{p: p, currPkg: pkg}
+ r.declReader.Reset(p.declData[off:])
+
+ r.obj(name)
+}
+
+func (p *iimporter) stringAt(off uint64) string {
+ if s, ok := p.stringCache[off]; ok {
+ return s
+ }
+
+ slen, n := binary.Uvarint(p.stringData[off:])
+ if n <= 0 {
+ errorf("varint failed")
+ }
+ spos := off + uint64(n)
+ s := string(p.stringData[spos : spos+slen])
+ p.stringCache[off] = s
+ return s
+}
+
+func (p *iimporter) fileAt(index uint64) *token.File {
+ file := p.fileCache[index]
+ if file == nil {
+ off := p.fileOffset[index]
+ file = p.decodeFile(intReader{bytes.NewReader(p.fileData[off:]), p.ipath})
+ p.fileCache[index] = file
+ }
+ return file
+}
+
+func (p *iimporter) decodeFile(rd intReader) *token.File {
+ filename := p.stringAt(rd.uint64())
+ size := int(rd.uint64())
+ file := p.fake.fset.AddFile(filename, -1, size)
+
+ // SetLines requires a nondecreasing sequence.
+ // Because it is common for clients to derive the interval
+ // [start, start+len(name)] from a start position, and we
+ // want to ensure that the end offset is on the same line,
+ // we fill in the gaps of the sparse encoding with values
+ // that strictly increase by the largest possible amount.
+ // This allows us to avoid having to record the actual end
+ // offset of each needed line.
+
+ lines := make([]int, int(rd.uint64()))
+ var index, offset int
+ for i, n := 0, int(rd.uint64()); i < n; i++ {
+ index += int(rd.uint64())
+ offset += int(rd.uint64())
+ lines[index] = offset
+
+ // Ensure monotonicity between points.
+ for j := index - 1; j > 0 && lines[j] == 0; j-- {
+ lines[j] = lines[j+1] - 1
+ }
+ }
+
+ // Ensure monotonicity after last point.
+ for j := len(lines) - 1; j > 0 && lines[j] == 0; j-- {
+ size--
+ lines[j] = size
+ }
+
+ if !file.SetLines(lines) {
+ errorf("SetLines failed: %d", lines) // can't happen
+ }
+ return file
+}
+
+func (p *iimporter) pkgAt(off uint64) *types.Package {
+ if pkg, ok := p.pkgCache[off]; ok {
+ return pkg
+ }
+ path := p.stringAt(off)
+ errorf("missing package %q in %q", path, p.ipath)
+ return nil
+}
+
+func (p *iimporter) typAt(off uint64, base *types.Named) types.Type {
+ if t, ok := p.typCache[off]; ok && canReuse(base, t) {
+ return t
+ }
+
+ if off < predeclReserved {
+ errorf("predeclared type missing from cache: %v", off)
+ }
+
+ r := &importReader{p: p}
+ r.declReader.Reset(p.declData[off-predeclReserved:])
+ t := r.doType(base)
+
+ if canReuse(base, t) {
+ p.typCache[off] = t
+ }
+ return t
+}
+
+// canReuse reports whether the type rhs on the RHS of the declaration for def
+// may be re-used.
+//
+// Specifically, if def is non-nil and rhs is an interface type with methods, it
+// may not be re-used because we have a convention of setting the receiver type
+// for interface methods to def.
+func canReuse(def *types.Named, rhs types.Type) bool {
+ if def == nil {
+ return true
+ }
+ iface, _ := rhs.(*types.Interface)
+ if iface == nil {
+ return true
+ }
+ // Don't use iface.Empty() here as iface may not be complete.
+ return iface.NumEmbeddeds() == 0 && iface.NumExplicitMethods() == 0
+}
+
+type importReader struct {
+ p *iimporter
+ declReader bytes.Reader
+ currPkg *types.Package
+ prevFile string
+ prevLine int64
+ prevColumn int64
+}
+
+func (r *importReader) obj(name string) {
+ tag := r.byte()
+ pos := r.pos()
+
+ switch tag {
+ case 'A':
+ typ := r.typ()
+
+ r.declare(types.NewTypeName(pos, r.currPkg, name, typ))
+
+ case 'C':
+ typ, val := r.value()
+
+ r.declare(types.NewConst(pos, r.currPkg, name, typ, val))
+
+ case 'F', 'G':
+ var tparams []*types.TypeParam
+ if tag == 'G' {
+ tparams = r.tparamList()
+ }
+ sig := r.signature(nil, nil, tparams)
+ r.declare(types.NewFunc(pos, r.currPkg, name, sig))
+
+ case 'T', 'U':
+ // Types can be recursive. We need to setup a stub
+ // declaration before recursing.
+ obj := types.NewTypeName(pos, r.currPkg, name, nil)
+ named := types.NewNamed(obj, nil, nil)
+ // Declare obj before calling r.tparamList, so the new type name is recognized
+ // if used in the constraint of one of its own typeparams (see #48280).
+ r.declare(obj)
+ if tag == 'U' {
+ tparams := r.tparamList()
+ named.SetTypeParams(tparams)
+ }
+
+ underlying := r.p.typAt(r.uint64(), named).Underlying()
+ named.SetUnderlying(underlying)
+
+ if !isInterface(underlying) {
+ for n := r.uint64(); n > 0; n-- {
+ mpos := r.pos()
+ mname := r.ident()
+ recv := r.param()
+
+ // If the receiver has any targs, set those as the
+ // rparams of the method (since those are the
+ // typeparams being used in the method sig/body).
+ base := baseType(recv.Type())
+ assert(base != nil)
+ targs := base.TypeArgs()
+ var rparams []*types.TypeParam
+ if targs.Len() > 0 {
+ rparams = make([]*types.TypeParam, targs.Len())
+ for i := range rparams {
+ rparams[i] = targs.At(i).(*types.TypeParam)
+ }
+ }
+ msig := r.signature(recv, rparams, nil)
+
+ named.AddMethod(types.NewFunc(mpos, r.currPkg, mname, msig))
+ }
+ }
+
+ case 'P':
+ // We need to "declare" a typeparam in order to have a name that
+ // can be referenced recursively (if needed) in the type param's
+ // bound.
+ if r.p.version < iexportVersionGenerics {
+ errorf("unexpected type param type")
+ }
+ name0 := tparamName(name)
+ tn := types.NewTypeName(pos, r.currPkg, name0, nil)
+ t := types.NewTypeParam(tn, nil)
+
+ // To handle recursive references to the typeparam within its
+ // bound, save the partial type in tparamIndex before reading the bounds.
+ id := ident{r.currPkg, name}
+ r.p.tparamIndex[id] = t
+ var implicit bool
+ if r.p.version >= iexportVersionGo1_18 {
+ implicit = r.bool()
+ }
+ constraint := r.typ()
+ if implicit {
+ iface, _ := constraint.(*types.Interface)
+ if iface == nil {
+ errorf("non-interface constraint marked implicit")
+ }
+ iface.MarkImplicit()
+ }
+ // The constraint type may not be complete, if we
+ // are in the middle of a type recursion involving type
+ // constraints. So, we defer SetConstraint until we have
+ // completely set up all types in ImportData.
+ r.p.later = append(r.p.later, setConstraintArgs{t: t, constraint: constraint})
+
+ case 'V':
+ typ := r.typ()
+
+ r.declare(types.NewVar(pos, r.currPkg, name, typ))
+
+ default:
+ errorf("unexpected tag: %v", tag)
+ }
+}
+
+func (r *importReader) declare(obj types.Object) {
+ obj.Pkg().Scope().Insert(obj)
+}
+
+func (r *importReader) value() (typ types.Type, val constant.Value) {
+ typ = r.typ()
+ if r.p.version >= iexportVersionGo1_18 {
+ // TODO: add support for using the kind.
+ _ = constant.Kind(r.int64())
+ }
+
+ switch b := typ.Underlying().(*types.Basic); b.Info() & types.IsConstType {
+ case types.IsBoolean:
+ val = constant.MakeBool(r.bool())
+
+ case types.IsString:
+ val = constant.MakeString(r.string())
+
+ case types.IsInteger:
+ var x big.Int
+ r.mpint(&x, b)
+ val = constant.Make(&x)
+
+ case types.IsFloat:
+ val = r.mpfloat(b)
+
+ case types.IsComplex:
+ re := r.mpfloat(b)
+ im := r.mpfloat(b)
+ val = constant.BinaryOp(re, token.ADD, constant.MakeImag(im))
+
+ default:
+ if b.Kind() == types.Invalid {
+ val = constant.MakeUnknown()
+ return
+ }
+ errorf("unexpected type %v", typ) // panics
+ panic("unreachable")
+ }
+
+ return
+}
+
+func intSize(b *types.Basic) (signed bool, maxBytes uint) {
+ if (b.Info() & types.IsUntyped) != 0 {
+ return true, 64
+ }
+
+ switch b.Kind() {
+ case types.Float32, types.Complex64:
+ return true, 3
+ case types.Float64, types.Complex128:
+ return true, 7
+ }
+
+ signed = (b.Info() & types.IsUnsigned) == 0
+ switch b.Kind() {
+ case types.Int8, types.Uint8:
+ maxBytes = 1
+ case types.Int16, types.Uint16:
+ maxBytes = 2
+ case types.Int32, types.Uint32:
+ maxBytes = 4
+ default:
+ maxBytes = 8
+ }
+
+ return
+}
+
+func (r *importReader) mpint(x *big.Int, typ *types.Basic) {
+ signed, maxBytes := intSize(typ)
+
+ maxSmall := 256 - maxBytes
+ if signed {
+ maxSmall = 256 - 2*maxBytes
+ }
+ if maxBytes == 1 {
+ maxSmall = 256
+ }
+
+ n, _ := r.declReader.ReadByte()
+ if uint(n) < maxSmall {
+ v := int64(n)
+ if signed {
+ v >>= 1
+ if n&1 != 0 {
+ v = ^v
+ }
+ }
+ x.SetInt64(v)
+ return
+ }
+
+ v := -n
+ if signed {
+ v = -(n &^ 1) >> 1
+ }
+ if v < 1 || uint(v) > maxBytes {
+ errorf("weird decoding: %v, %v => %v", n, signed, v)
+ }
+ b := make([]byte, v)
+ io.ReadFull(&r.declReader, b)
+ x.SetBytes(b)
+ if signed && n&1 != 0 {
+ x.Neg(x)
+ }
+}
+
+func (r *importReader) mpfloat(typ *types.Basic) constant.Value {
+ var mant big.Int
+ r.mpint(&mant, typ)
+ var f big.Float
+ f.SetInt(&mant)
+ if f.Sign() != 0 {
+ f.SetMantExp(&f, int(r.int64()))
+ }
+ return constant.Make(&f)
+}
+
+func (r *importReader) ident() string {
+ return r.string()
+}
+
+func (r *importReader) qualifiedIdent() (*types.Package, string) {
+ name := r.string()
+ pkg := r.pkg()
+ return pkg, name
+}
+
+func (r *importReader) pos() token.Pos {
+ if r.p.shallow {
+ // precise offsets are encoded only in shallow mode
+ return r.posv2()
+ }
+ if r.p.version >= iexportVersionPosCol {
+ r.posv1()
+ } else {
+ r.posv0()
+ }
+
+ if r.prevFile == "" && r.prevLine == 0 && r.prevColumn == 0 {
+ return token.NoPos
+ }
+ return r.p.fake.pos(r.prevFile, int(r.prevLine), int(r.prevColumn))
+}
+
+func (r *importReader) posv0() {
+ delta := r.int64()
+ if delta != deltaNewFile {
+ r.prevLine += delta
+ } else if l := r.int64(); l == -1 {
+ r.prevLine += deltaNewFile
+ } else {
+ r.prevFile = r.string()
+ r.prevLine = l
+ }
+}
+
+func (r *importReader) posv1() {
+ delta := r.int64()
+ r.prevColumn += delta >> 1
+ if delta&1 != 0 {
+ delta = r.int64()
+ r.prevLine += delta >> 1
+ if delta&1 != 0 {
+ r.prevFile = r.string()
+ }
+ }
+}
+
+func (r *importReader) posv2() token.Pos {
+ file := r.uint64()
+ if file == 0 {
+ return token.NoPos
+ }
+ tf := r.p.fileAt(file - 1)
+ return tf.Pos(int(r.uint64()))
+}
+
+func (r *importReader) typ() types.Type {
+ return r.p.typAt(r.uint64(), nil)
+}
+
+func isInterface(t types.Type) bool {
+ _, ok := t.(*types.Interface)
+ return ok
+}
+
+func (r *importReader) pkg() *types.Package { return r.p.pkgAt(r.uint64()) }
+func (r *importReader) string() string { return r.p.stringAt(r.uint64()) }
+
+func (r *importReader) doType(base *types.Named) (res types.Type) {
+ k := r.kind()
+ if debug {
+ r.p.trace("importing type %d (base: %s)", k, base)
+ r.p.indent++
+ defer func() {
+ r.p.indent--
+ r.p.trace("=> %s", res)
+ }()
+ }
+ switch k {
+ default:
+ errorf("unexpected kind tag in %q: %v", r.p.ipath, k)
+ return nil
+
+ case definedType:
+ pkg, name := r.qualifiedIdent()
+ r.p.doDecl(pkg, name)
+ return pkg.Scope().Lookup(name).(*types.TypeName).Type()
+ case pointerType:
+ return types.NewPointer(r.typ())
+ case sliceType:
+ return types.NewSlice(r.typ())
+ case arrayType:
+ n := r.uint64()
+ return types.NewArray(r.typ(), int64(n))
+ case chanType:
+ dir := chanDir(int(r.uint64()))
+ return types.NewChan(dir, r.typ())
+ case mapType:
+ return types.NewMap(r.typ(), r.typ())
+ case signatureType:
+ r.currPkg = r.pkg()
+ return r.signature(nil, nil, nil)
+
+ case structType:
+ r.currPkg = r.pkg()
+
+ fields := make([]*types.Var, r.uint64())
+ tags := make([]string, len(fields))
+ for i := range fields {
+ var field *types.Var
+ if r.p.shallow {
+ field, _ = r.objectPathObject().(*types.Var)
+ }
+
+ fpos := r.pos()
+ fname := r.ident()
+ ftyp := r.typ()
+ emb := r.bool()
+ tag := r.string()
+
+ // Either this is not a shallow import, the field is local, or the
+ // encoded objectPath failed to produce an object (a bug).
+ //
+ // Even in this last, buggy case, fall back on creating a new field. As
+ // discussed in iexport.go, this is not correct, but mostly works and is
+ // preferable to failing (for now at least).
+ if field == nil {
+ field = types.NewField(fpos, r.currPkg, fname, ftyp, emb)
+ }
+
+ fields[i] = field
+ tags[i] = tag
+ }
+ return types.NewStruct(fields, tags)
+
+ case interfaceType:
+ r.currPkg = r.pkg()
+
+ embeddeds := make([]types.Type, r.uint64())
+ for i := range embeddeds {
+ _ = r.pos()
+ embeddeds[i] = r.typ()
+ }
+
+ methods := make([]*types.Func, r.uint64())
+ for i := range methods {
+ var method *types.Func
+ if r.p.shallow {
+ method, _ = r.objectPathObject().(*types.Func)
+ }
+
+ mpos := r.pos()
+ mname := r.ident()
+
+ // TODO(mdempsky): Matches bimport.go, but I
+ // don't agree with this.
+ var recv *types.Var
+ if base != nil {
+ recv = types.NewVar(token.NoPos, r.currPkg, "", base)
+ }
+ msig := r.signature(recv, nil, nil)
+
+ if method == nil {
+ method = types.NewFunc(mpos, r.currPkg, mname, msig)
+ }
+ methods[i] = method
+ }
+
+ typ := newInterface(methods, embeddeds)
+ r.p.interfaceList = append(r.p.interfaceList, typ)
+ return typ
+
+ case typeParamType:
+ if r.p.version < iexportVersionGenerics {
+ errorf("unexpected type param type")
+ }
+ pkg, name := r.qualifiedIdent()
+ id := ident{pkg, name}
+ if t, ok := r.p.tparamIndex[id]; ok {
+ // We're already in the process of importing this typeparam.
+ return t
+ }
+ // Otherwise, import the definition of the typeparam now.
+ r.p.doDecl(pkg, name)
+ return r.p.tparamIndex[id]
+
+ case instanceType:
+ if r.p.version < iexportVersionGenerics {
+ errorf("unexpected instantiation type")
+ }
+ // pos does not matter for instances: they are positioned on the original
+ // type.
+ _ = r.pos()
+ len := r.uint64()
+ targs := make([]types.Type, len)
+ for i := range targs {
+ targs[i] = r.typ()
+ }
+ baseType := r.typ()
+ // The imported instantiated type doesn't include any methods, so
+ // we must always use the methods of the base (orig) type.
+ // TODO provide a non-nil *Environment
+ t, _ := types.Instantiate(nil, baseType, targs, false)
+
+ // Workaround for golang/go#61561. See the doc for instanceList for details.
+ r.p.instanceList = append(r.p.instanceList, t)
+ return t
+
+ case unionType:
+ if r.p.version < iexportVersionGenerics {
+ errorf("unexpected instantiation type")
+ }
+ terms := make([]*types.Term, r.uint64())
+ for i := range terms {
+ terms[i] = types.NewTerm(r.bool(), r.typ())
+ }
+ return types.NewUnion(terms)
+ }
+}
+
+func (r *importReader) kind() itag {
+ return itag(r.uint64())
+}
+
+// objectPathObject is the inverse of exportWriter.objectPath.
+//
+// In shallow mode, certain fields and methods may need to be looked up in an
+// imported package. See the doc for exportWriter.objectPath for a full
+// explanation.
+func (r *importReader) objectPathObject() types.Object {
+ objPath := objectpath.Path(r.string())
+ if objPath == "" {
+ return nil
+ }
+ pkg := r.pkg()
+ obj, err := objectpath.Object(pkg, objPath)
+ if err != nil {
+ if r.p.reportf != nil {
+ r.p.reportf("failed to find object for objectPath %q: %v", objPath, err)
+ }
+ }
+ return obj
+}
+
+func (r *importReader) signature(recv *types.Var, rparams []*types.TypeParam, tparams []*types.TypeParam) *types.Signature {
+ params := r.paramList()
+ results := r.paramList()
+ variadic := params.Len() > 0 && r.bool()
+ return types.NewSignatureType(recv, rparams, tparams, params, results, variadic)
+}
+
+func (r *importReader) tparamList() []*types.TypeParam {
+ n := r.uint64()
+ if n == 0 {
+ return nil
+ }
+ xs := make([]*types.TypeParam, n)
+ for i := range xs {
+ // Note: the standard library importer is tolerant of nil types here,
+ // though would panic in SetTypeParams.
+ xs[i] = r.typ().(*types.TypeParam)
+ }
+ return xs
+}
+
+func (r *importReader) paramList() *types.Tuple {
+ xs := make([]*types.Var, r.uint64())
+ for i := range xs {
+ xs[i] = r.param()
+ }
+ return types.NewTuple(xs...)
+}
+
+func (r *importReader) param() *types.Var {
+ pos := r.pos()
+ name := r.ident()
+ typ := r.typ()
+ return types.NewParam(pos, r.currPkg, name, typ)
+}
+
+func (r *importReader) bool() bool {
+ return r.uint64() != 0
+}
+
+func (r *importReader) int64() int64 {
+ n, err := binary.ReadVarint(&r.declReader)
+ if err != nil {
+ errorf("readVarint: %v", err)
+ }
+ return n
+}
+
+func (r *importReader) uint64() uint64 {
+ n, err := binary.ReadUvarint(&r.declReader)
+ if err != nil {
+ errorf("readUvarint: %v", err)
+ }
+ return n
+}
+
+func (r *importReader) byte() byte {
+ x, err := r.declReader.ReadByte()
+ if err != nil {
+ errorf("declReader.ReadByte: %v", err)
+ }
+ return x
+}
+
+func baseType(typ types.Type) *types.Named {
+ // pointer receivers are never types.Named types
+ if p, _ := typ.(*types.Pointer); p != nil {
+ typ = p.Elem()
+ }
+ // receiver base types are always (possibly generic) types.Named types
+ n, _ := typ.(*types.Named)
+ return n
+}
diff --git a/vendor/golang.org/x/tools/internal/gcimporter/newInterface10.go b/vendor/golang.org/x/tools/internal/gcimporter/newInterface10.go
new file mode 100644
index 0000000000..8b163e3d05
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/gcimporter/newInterface10.go
@@ -0,0 +1,22 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !go1.11
+// +build !go1.11
+
+package gcimporter
+
+import "go/types"
+
+func newInterface(methods []*types.Func, embeddeds []types.Type) *types.Interface {
+ named := make([]*types.Named, len(embeddeds))
+ for i, e := range embeddeds {
+ var ok bool
+ named[i], ok = e.(*types.Named)
+ if !ok {
+ panic("embedding of non-defined interfaces in interfaces is not supported before Go 1.11")
+ }
+ }
+ return types.NewInterface(methods, named)
+}
diff --git a/vendor/golang.org/x/tools/internal/gcimporter/newInterface11.go b/vendor/golang.org/x/tools/internal/gcimporter/newInterface11.go
new file mode 100644
index 0000000000..49984f40fd
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/gcimporter/newInterface11.go
@@ -0,0 +1,14 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.11
+// +build go1.11
+
+package gcimporter
+
+import "go/types"
+
+func newInterface(methods []*types.Func, embeddeds []types.Type) *types.Interface {
+ return types.NewInterfaceType(methods, embeddeds)
+}
diff --git a/vendor/golang.org/x/tools/internal/gcimporter/support_go117.go b/vendor/golang.org/x/tools/internal/gcimporter/support_go117.go
new file mode 100644
index 0000000000..d892273efb
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/gcimporter/support_go117.go
@@ -0,0 +1,16 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !go1.18
+// +build !go1.18
+
+package gcimporter
+
+import "go/types"
+
+const iexportVersion = iexportVersionGo1_11
+
+func additionalPredeclared() []types.Type {
+ return nil
+}
diff --git a/vendor/golang.org/x/tools/internal/gcimporter/support_go118.go b/vendor/golang.org/x/tools/internal/gcimporter/support_go118.go
new file mode 100644
index 0000000000..edbe6ea704
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/gcimporter/support_go118.go
@@ -0,0 +1,37 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.18
+// +build go1.18
+
+package gcimporter
+
+import "go/types"
+
+const iexportVersion = iexportVersionGenerics
+
+// additionalPredeclared returns additional predeclared types in go.1.18.
+func additionalPredeclared() []types.Type {
+ return []types.Type{
+ // comparable
+ types.Universe.Lookup("comparable").Type(),
+
+ // any
+ types.Universe.Lookup("any").Type(),
+ }
+}
+
+// See cmd/compile/internal/types.SplitVargenSuffix.
+func splitVargenSuffix(name string) (base, suffix string) {
+ i := len(name)
+ for i > 0 && name[i-1] >= '0' && name[i-1] <= '9' {
+ i--
+ }
+ const dot = "·"
+ if i >= len(dot) && name[i-len(dot):i] == dot {
+ i -= len(dot)
+ return name[:i], name[i:]
+ }
+ return name, ""
+}
diff --git a/vendor/golang.org/x/tools/internal/gcimporter/unified_no.go b/vendor/golang.org/x/tools/internal/gcimporter/unified_no.go
new file mode 100644
index 0000000000..286bf44548
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/gcimporter/unified_no.go
@@ -0,0 +1,10 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !(go1.18 && goexperiment.unified)
+// +build !go1.18 !goexperiment.unified
+
+package gcimporter
+
+const unifiedIR = false
diff --git a/vendor/golang.org/x/tools/internal/gcimporter/unified_yes.go b/vendor/golang.org/x/tools/internal/gcimporter/unified_yes.go
new file mode 100644
index 0000000000..b5d69ffbe6
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/gcimporter/unified_yes.go
@@ -0,0 +1,10 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.18 && goexperiment.unified
+// +build go1.18,goexperiment.unified
+
+package gcimporter
+
+const unifiedIR = true
diff --git a/vendor/golang.org/x/tools/internal/gcimporter/ureader_no.go b/vendor/golang.org/x/tools/internal/gcimporter/ureader_no.go
new file mode 100644
index 0000000000..8eb20729c2
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/gcimporter/ureader_no.go
@@ -0,0 +1,19 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !go1.18
+// +build !go1.18
+
+package gcimporter
+
+import (
+ "fmt"
+ "go/token"
+ "go/types"
+)
+
+func UImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (_ int, pkg *types.Package, err error) {
+ err = fmt.Errorf("go/tools compiled with a Go version earlier than 1.18 cannot read unified IR export data")
+ return
+}
diff --git a/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go b/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go
new file mode 100644
index 0000000000..b977435f62
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go
@@ -0,0 +1,728 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Derived from go/internal/gcimporter/ureader.go
+
+//go:build go1.18
+// +build go1.18
+
+package gcimporter
+
+import (
+ "fmt"
+ "go/token"
+ "go/types"
+ "sort"
+ "strings"
+
+ "golang.org/x/tools/internal/pkgbits"
+)
+
+// A pkgReader holds the shared state for reading a unified IR package
+// description.
+type pkgReader struct {
+ pkgbits.PkgDecoder
+
+ fake fakeFileSet
+
+ ctxt *types.Context
+ imports map[string]*types.Package // previously imported packages, indexed by path
+
+ // lazily initialized arrays corresponding to the unified IR
+ // PosBase, Pkg, and Type sections, respectively.
+ posBases []string // position bases (i.e., file names)
+ pkgs []*types.Package
+ typs []types.Type
+
+ // laterFns holds functions that need to be invoked at the end of
+ // import reading.
+ laterFns []func()
+ // laterFors is used in case of 'type A B' to ensure that B is processed before A.
+ laterFors map[types.Type]int
+
+ // ifaces holds a list of constructed Interfaces, which need to have
+ // Complete called after importing is done.
+ ifaces []*types.Interface
+}
+
+// later adds a function to be invoked at the end of import reading.
+func (pr *pkgReader) later(fn func()) {
+ pr.laterFns = append(pr.laterFns, fn)
+}
+
+// See cmd/compile/internal/noder.derivedInfo.
+type derivedInfo struct {
+ idx pkgbits.Index
+ needed bool
+}
+
+// See cmd/compile/internal/noder.typeInfo.
+type typeInfo struct {
+ idx pkgbits.Index
+ derived bool
+}
+
+func UImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (_ int, pkg *types.Package, err error) {
+ if !debug {
+ defer func() {
+ if x := recover(); x != nil {
+ err = fmt.Errorf("internal error in importing %q (%v); please report an issue", path, x)
+ }
+ }()
+ }
+
+ s := string(data)
+ s = s[:strings.LastIndex(s, "\n$$\n")]
+ input := pkgbits.NewPkgDecoder(path, s)
+ pkg = readUnifiedPackage(fset, nil, imports, input)
+ return
+}
+
+// laterFor adds a function to be invoked at the end of import reading, and records the type that function is finishing.
+func (pr *pkgReader) laterFor(t types.Type, fn func()) {
+ if pr.laterFors == nil {
+ pr.laterFors = make(map[types.Type]int)
+ }
+ pr.laterFors[t] = len(pr.laterFns)
+ pr.laterFns = append(pr.laterFns, fn)
+}
+
+// readUnifiedPackage reads a package description from the given
+// unified IR export data decoder.
+func readUnifiedPackage(fset *token.FileSet, ctxt *types.Context, imports map[string]*types.Package, input pkgbits.PkgDecoder) *types.Package {
+ pr := pkgReader{
+ PkgDecoder: input,
+
+ fake: fakeFileSet{
+ fset: fset,
+ files: make(map[string]*fileInfo),
+ },
+
+ ctxt: ctxt,
+ imports: imports,
+
+ posBases: make([]string, input.NumElems(pkgbits.RelocPosBase)),
+ pkgs: make([]*types.Package, input.NumElems(pkgbits.RelocPkg)),
+ typs: make([]types.Type, input.NumElems(pkgbits.RelocType)),
+ }
+ defer pr.fake.setLines()
+
+ r := pr.newReader(pkgbits.RelocMeta, pkgbits.PublicRootIdx, pkgbits.SyncPublic)
+ pkg := r.pkg()
+ r.Bool() // has init
+
+ for i, n := 0, r.Len(); i < n; i++ {
+ // As if r.obj(), but avoiding the Scope.Lookup call,
+ // to avoid eager loading of imports.
+ r.Sync(pkgbits.SyncObject)
+ assert(!r.Bool())
+ r.p.objIdx(r.Reloc(pkgbits.RelocObj))
+ assert(r.Len() == 0)
+ }
+
+ r.Sync(pkgbits.SyncEOF)
+
+ for _, fn := range pr.laterFns {
+ fn()
+ }
+
+ for _, iface := range pr.ifaces {
+ iface.Complete()
+ }
+
+ // Imports() of pkg are all of the transitive packages that were loaded.
+ var imps []*types.Package
+ for _, imp := range pr.pkgs {
+ if imp != nil && imp != pkg {
+ imps = append(imps, imp)
+ }
+ }
+ sort.Sort(byPath(imps))
+ pkg.SetImports(imps)
+
+ pkg.MarkComplete()
+ return pkg
+}
+
+// A reader holds the state for reading a single unified IR element
+// within a package.
+type reader struct {
+ pkgbits.Decoder
+
+ p *pkgReader
+
+ dict *readerDict
+}
+
+// A readerDict holds the state for type parameters that parameterize
+// the current unified IR element.
+type readerDict struct {
+ // bounds is a slice of typeInfos corresponding to the underlying
+ // bounds of the element's type parameters.
+ bounds []typeInfo
+
+ // tparams is a slice of the constructed TypeParams for the element.
+ tparams []*types.TypeParam
+
+ // devived is a slice of types derived from tparams, which may be
+ // instantiated while reading the current element.
+ derived []derivedInfo
+ derivedTypes []types.Type // lazily instantiated from derived
+}
+
+func (pr *pkgReader) newReader(k pkgbits.RelocKind, idx pkgbits.Index, marker pkgbits.SyncMarker) *reader {
+ return &reader{
+ Decoder: pr.NewDecoder(k, idx, marker),
+ p: pr,
+ }
+}
+
+func (pr *pkgReader) tempReader(k pkgbits.RelocKind, idx pkgbits.Index, marker pkgbits.SyncMarker) *reader {
+ return &reader{
+ Decoder: pr.TempDecoder(k, idx, marker),
+ p: pr,
+ }
+}
+
+func (pr *pkgReader) retireReader(r *reader) {
+ pr.RetireDecoder(&r.Decoder)
+}
+
+// @@@ Positions
+
+func (r *reader) pos() token.Pos {
+ r.Sync(pkgbits.SyncPos)
+ if !r.Bool() {
+ return token.NoPos
+ }
+
+ // TODO(mdempsky): Delta encoding.
+ posBase := r.posBase()
+ line := r.Uint()
+ col := r.Uint()
+ return r.p.fake.pos(posBase, int(line), int(col))
+}
+
+func (r *reader) posBase() string {
+ return r.p.posBaseIdx(r.Reloc(pkgbits.RelocPosBase))
+}
+
+func (pr *pkgReader) posBaseIdx(idx pkgbits.Index) string {
+ if b := pr.posBases[idx]; b != "" {
+ return b
+ }
+
+ var filename string
+ {
+ r := pr.tempReader(pkgbits.RelocPosBase, idx, pkgbits.SyncPosBase)
+
+ // Within types2, position bases have a lot more details (e.g.,
+ // keeping track of where //line directives appeared exactly).
+ //
+ // For go/types, we just track the file name.
+
+ filename = r.String()
+
+ if r.Bool() { // file base
+ // Was: "b = token.NewTrimmedFileBase(filename, true)"
+ } else { // line base
+ pos := r.pos()
+ line := r.Uint()
+ col := r.Uint()
+
+ // Was: "b = token.NewLineBase(pos, filename, true, line, col)"
+ _, _, _ = pos, line, col
+ }
+ pr.retireReader(r)
+ }
+ b := filename
+ pr.posBases[idx] = b
+ return b
+}
+
+// @@@ Packages
+
+func (r *reader) pkg() *types.Package {
+ r.Sync(pkgbits.SyncPkg)
+ return r.p.pkgIdx(r.Reloc(pkgbits.RelocPkg))
+}
+
+func (pr *pkgReader) pkgIdx(idx pkgbits.Index) *types.Package {
+ // TODO(mdempsky): Consider using some non-nil pointer to indicate
+ // the universe scope, so we don't need to keep re-reading it.
+ if pkg := pr.pkgs[idx]; pkg != nil {
+ return pkg
+ }
+
+ pkg := pr.newReader(pkgbits.RelocPkg, idx, pkgbits.SyncPkgDef).doPkg()
+ pr.pkgs[idx] = pkg
+ return pkg
+}
+
+func (r *reader) doPkg() *types.Package {
+ path := r.String()
+ switch path {
+ case "":
+ path = r.p.PkgPath()
+ case "builtin":
+ return nil // universe
+ case "unsafe":
+ return types.Unsafe
+ }
+
+ if pkg := r.p.imports[path]; pkg != nil {
+ return pkg
+ }
+
+ name := r.String()
+
+ pkg := types.NewPackage(path, name)
+ r.p.imports[path] = pkg
+
+ return pkg
+}
+
+// @@@ Types
+
+func (r *reader) typ() types.Type {
+ return r.p.typIdx(r.typInfo(), r.dict)
+}
+
+func (r *reader) typInfo() typeInfo {
+ r.Sync(pkgbits.SyncType)
+ if r.Bool() {
+ return typeInfo{idx: pkgbits.Index(r.Len()), derived: true}
+ }
+ return typeInfo{idx: r.Reloc(pkgbits.RelocType), derived: false}
+}
+
+func (pr *pkgReader) typIdx(info typeInfo, dict *readerDict) types.Type {
+ idx := info.idx
+ var where *types.Type
+ if info.derived {
+ where = &dict.derivedTypes[idx]
+ idx = dict.derived[idx].idx
+ } else {
+ where = &pr.typs[idx]
+ }
+
+ if typ := *where; typ != nil {
+ return typ
+ }
+
+ var typ types.Type
+ {
+ r := pr.tempReader(pkgbits.RelocType, idx, pkgbits.SyncTypeIdx)
+ r.dict = dict
+
+ typ = r.doTyp()
+ assert(typ != nil)
+ pr.retireReader(r)
+ }
+ // See comment in pkgReader.typIdx explaining how this happens.
+ if prev := *where; prev != nil {
+ return prev
+ }
+
+ *where = typ
+ return typ
+}
+
+func (r *reader) doTyp() (res types.Type) {
+ switch tag := pkgbits.CodeType(r.Code(pkgbits.SyncType)); tag {
+ default:
+ errorf("unhandled type tag: %v", tag)
+ panic("unreachable")
+
+ case pkgbits.TypeBasic:
+ return types.Typ[r.Len()]
+
+ case pkgbits.TypeNamed:
+ obj, targs := r.obj()
+ name := obj.(*types.TypeName)
+ if len(targs) != 0 {
+ t, _ := types.Instantiate(r.p.ctxt, name.Type(), targs, false)
+ return t
+ }
+ return name.Type()
+
+ case pkgbits.TypeTypeParam:
+ return r.dict.tparams[r.Len()]
+
+ case pkgbits.TypeArray:
+ len := int64(r.Uint64())
+ return types.NewArray(r.typ(), len)
+ case pkgbits.TypeChan:
+ dir := types.ChanDir(r.Len())
+ return types.NewChan(dir, r.typ())
+ case pkgbits.TypeMap:
+ return types.NewMap(r.typ(), r.typ())
+ case pkgbits.TypePointer:
+ return types.NewPointer(r.typ())
+ case pkgbits.TypeSignature:
+ return r.signature(nil, nil, nil)
+ case pkgbits.TypeSlice:
+ return types.NewSlice(r.typ())
+ case pkgbits.TypeStruct:
+ return r.structType()
+ case pkgbits.TypeInterface:
+ return r.interfaceType()
+ case pkgbits.TypeUnion:
+ return r.unionType()
+ }
+}
+
+func (r *reader) structType() *types.Struct {
+ fields := make([]*types.Var, r.Len())
+ var tags []string
+ for i := range fields {
+ pos := r.pos()
+ pkg, name := r.selector()
+ ftyp := r.typ()
+ tag := r.String()
+ embedded := r.Bool()
+
+ fields[i] = types.NewField(pos, pkg, name, ftyp, embedded)
+ if tag != "" {
+ for len(tags) < i {
+ tags = append(tags, "")
+ }
+ tags = append(tags, tag)
+ }
+ }
+ return types.NewStruct(fields, tags)
+}
+
+func (r *reader) unionType() *types.Union {
+ terms := make([]*types.Term, r.Len())
+ for i := range terms {
+ terms[i] = types.NewTerm(r.Bool(), r.typ())
+ }
+ return types.NewUnion(terms)
+}
+
+func (r *reader) interfaceType() *types.Interface {
+ methods := make([]*types.Func, r.Len())
+ embeddeds := make([]types.Type, r.Len())
+ implicit := len(methods) == 0 && len(embeddeds) == 1 && r.Bool()
+
+ for i := range methods {
+ pos := r.pos()
+ pkg, name := r.selector()
+ mtyp := r.signature(nil, nil, nil)
+ methods[i] = types.NewFunc(pos, pkg, name, mtyp)
+ }
+
+ for i := range embeddeds {
+ embeddeds[i] = r.typ()
+ }
+
+ iface := types.NewInterfaceType(methods, embeddeds)
+ if implicit {
+ iface.MarkImplicit()
+ }
+
+ // We need to call iface.Complete(), but if there are any embedded
+ // defined types, then we may not have set their underlying
+ // interface type yet. So we need to defer calling Complete until
+ // after we've called SetUnderlying everywhere.
+ //
+ // TODO(mdempsky): After CL 424876 lands, it should be safe to call
+ // iface.Complete() immediately.
+ r.p.ifaces = append(r.p.ifaces, iface)
+
+ return iface
+}
+
+func (r *reader) signature(recv *types.Var, rtparams, tparams []*types.TypeParam) *types.Signature {
+ r.Sync(pkgbits.SyncSignature)
+
+ params := r.params()
+ results := r.params()
+ variadic := r.Bool()
+
+ return types.NewSignatureType(recv, rtparams, tparams, params, results, variadic)
+}
+
+func (r *reader) params() *types.Tuple {
+ r.Sync(pkgbits.SyncParams)
+
+ params := make([]*types.Var, r.Len())
+ for i := range params {
+ params[i] = r.param()
+ }
+
+ return types.NewTuple(params...)
+}
+
+func (r *reader) param() *types.Var {
+ r.Sync(pkgbits.SyncParam)
+
+ pos := r.pos()
+ pkg, name := r.localIdent()
+ typ := r.typ()
+
+ return types.NewParam(pos, pkg, name, typ)
+}
+
+// @@@ Objects
+
+func (r *reader) obj() (types.Object, []types.Type) {
+ r.Sync(pkgbits.SyncObject)
+
+ assert(!r.Bool())
+
+ pkg, name := r.p.objIdx(r.Reloc(pkgbits.RelocObj))
+ obj := pkgScope(pkg).Lookup(name)
+
+ targs := make([]types.Type, r.Len())
+ for i := range targs {
+ targs[i] = r.typ()
+ }
+
+ return obj, targs
+}
+
+func (pr *pkgReader) objIdx(idx pkgbits.Index) (*types.Package, string) {
+
+ var objPkg *types.Package
+ var objName string
+ var tag pkgbits.CodeObj
+ {
+ rname := pr.tempReader(pkgbits.RelocName, idx, pkgbits.SyncObject1)
+
+ objPkg, objName = rname.qualifiedIdent()
+ assert(objName != "")
+
+ tag = pkgbits.CodeObj(rname.Code(pkgbits.SyncCodeObj))
+ pr.retireReader(rname)
+ }
+
+ if tag == pkgbits.ObjStub {
+ assert(objPkg == nil || objPkg == types.Unsafe)
+ return objPkg, objName
+ }
+
+ // Ignore local types promoted to global scope (#55110).
+ if _, suffix := splitVargenSuffix(objName); suffix != "" {
+ return objPkg, objName
+ }
+
+ if objPkg.Scope().Lookup(objName) == nil {
+ dict := pr.objDictIdx(idx)
+
+ r := pr.newReader(pkgbits.RelocObj, idx, pkgbits.SyncObject1)
+ r.dict = dict
+
+ declare := func(obj types.Object) {
+ objPkg.Scope().Insert(obj)
+ }
+
+ switch tag {
+ default:
+ panic("weird")
+
+ case pkgbits.ObjAlias:
+ pos := r.pos()
+ typ := r.typ()
+ declare(types.NewTypeName(pos, objPkg, objName, typ))
+
+ case pkgbits.ObjConst:
+ pos := r.pos()
+ typ := r.typ()
+ val := r.Value()
+ declare(types.NewConst(pos, objPkg, objName, typ, val))
+
+ case pkgbits.ObjFunc:
+ pos := r.pos()
+ tparams := r.typeParamNames()
+ sig := r.signature(nil, nil, tparams)
+ declare(types.NewFunc(pos, objPkg, objName, sig))
+
+ case pkgbits.ObjType:
+ pos := r.pos()
+
+ obj := types.NewTypeName(pos, objPkg, objName, nil)
+ named := types.NewNamed(obj, nil, nil)
+ declare(obj)
+
+ named.SetTypeParams(r.typeParamNames())
+
+ setUnderlying := func(underlying types.Type) {
+ // If the underlying type is an interface, we need to
+ // duplicate its methods so we can replace the receiver
+ // parameter's type (#49906).
+ if iface, ok := underlying.(*types.Interface); ok && iface.NumExplicitMethods() != 0 {
+ methods := make([]*types.Func, iface.NumExplicitMethods())
+ for i := range methods {
+ fn := iface.ExplicitMethod(i)
+ sig := fn.Type().(*types.Signature)
+
+ recv := types.NewVar(fn.Pos(), fn.Pkg(), "", named)
+ methods[i] = types.NewFunc(fn.Pos(), fn.Pkg(), fn.Name(), types.NewSignature(recv, sig.Params(), sig.Results(), sig.Variadic()))
+ }
+
+ embeds := make([]types.Type, iface.NumEmbeddeds())
+ for i := range embeds {
+ embeds[i] = iface.EmbeddedType(i)
+ }
+
+ newIface := types.NewInterfaceType(methods, embeds)
+ r.p.ifaces = append(r.p.ifaces, newIface)
+ underlying = newIface
+ }
+
+ named.SetUnderlying(underlying)
+ }
+
+ // Since go.dev/cl/455279, we can assume rhs.Underlying() will
+ // always be non-nil. However, to temporarily support users of
+ // older snapshot releases, we continue to fallback to the old
+ // behavior for now.
+ //
+ // TODO(mdempsky): Remove fallback code and simplify after
+ // allowing time for snapshot users to upgrade.
+ rhs := r.typ()
+ if underlying := rhs.Underlying(); underlying != nil {
+ setUnderlying(underlying)
+ } else {
+ pk := r.p
+ pk.laterFor(named, func() {
+ // First be sure that the rhs is initialized, if it needs to be initialized.
+ delete(pk.laterFors, named) // prevent cycles
+ if i, ok := pk.laterFors[rhs]; ok {
+ f := pk.laterFns[i]
+ pk.laterFns[i] = func() {} // function is running now, so replace it with a no-op
+ f() // initialize RHS
+ }
+ setUnderlying(rhs.Underlying())
+ })
+ }
+
+ for i, n := 0, r.Len(); i < n; i++ {
+ named.AddMethod(r.method())
+ }
+
+ case pkgbits.ObjVar:
+ pos := r.pos()
+ typ := r.typ()
+ declare(types.NewVar(pos, objPkg, objName, typ))
+ }
+ }
+
+ return objPkg, objName
+}
+
+func (pr *pkgReader) objDictIdx(idx pkgbits.Index) *readerDict {
+
+ var dict readerDict
+
+ {
+ r := pr.tempReader(pkgbits.RelocObjDict, idx, pkgbits.SyncObject1)
+ if implicits := r.Len(); implicits != 0 {
+ errorf("unexpected object with %v implicit type parameter(s)", implicits)
+ }
+
+ dict.bounds = make([]typeInfo, r.Len())
+ for i := range dict.bounds {
+ dict.bounds[i] = r.typInfo()
+ }
+
+ dict.derived = make([]derivedInfo, r.Len())
+ dict.derivedTypes = make([]types.Type, len(dict.derived))
+ for i := range dict.derived {
+ dict.derived[i] = derivedInfo{r.Reloc(pkgbits.RelocType), r.Bool()}
+ }
+
+ pr.retireReader(r)
+ }
+ // function references follow, but reader doesn't need those
+
+ return &dict
+}
+
+func (r *reader) typeParamNames() []*types.TypeParam {
+ r.Sync(pkgbits.SyncTypeParamNames)
+
+ // Note: This code assumes it only processes objects without
+ // implement type parameters. This is currently fine, because
+ // reader is only used to read in exported declarations, which are
+ // always package scoped.
+
+ if len(r.dict.bounds) == 0 {
+ return nil
+ }
+
+ // Careful: Type parameter lists may have cycles. To allow for this,
+ // we construct the type parameter list in two passes: first we
+ // create all the TypeNames and TypeParams, then we construct and
+ // set the bound type.
+
+ r.dict.tparams = make([]*types.TypeParam, len(r.dict.bounds))
+ for i := range r.dict.bounds {
+ pos := r.pos()
+ pkg, name := r.localIdent()
+
+ tname := types.NewTypeName(pos, pkg, name, nil)
+ r.dict.tparams[i] = types.NewTypeParam(tname, nil)
+ }
+
+ typs := make([]types.Type, len(r.dict.bounds))
+ for i, bound := range r.dict.bounds {
+ typs[i] = r.p.typIdx(bound, r.dict)
+ }
+
+ // TODO(mdempsky): This is subtle, elaborate further.
+ //
+ // We have to save tparams outside of the closure, because
+ // typeParamNames() can be called multiple times with the same
+ // dictionary instance.
+ //
+ // Also, this needs to happen later to make sure SetUnderlying has
+ // been called.
+ //
+ // TODO(mdempsky): Is it safe to have a single "later" slice or do
+ // we need to have multiple passes? See comments on CL 386002 and
+ // go.dev/issue/52104.
+ tparams := r.dict.tparams
+ r.p.later(func() {
+ for i, typ := range typs {
+ tparams[i].SetConstraint(typ)
+ }
+ })
+
+ return r.dict.tparams
+}
+
+func (r *reader) method() *types.Func {
+ r.Sync(pkgbits.SyncMethod)
+ pos := r.pos()
+ pkg, name := r.selector()
+
+ rparams := r.typeParamNames()
+ sig := r.signature(r.param(), rparams, nil)
+
+ _ = r.pos() // TODO(mdempsky): Remove; this is a hacker for linker.go.
+ return types.NewFunc(pos, pkg, name, sig)
+}
+
+func (r *reader) qualifiedIdent() (*types.Package, string) { return r.ident(pkgbits.SyncSym) }
+func (r *reader) localIdent() (*types.Package, string) { return r.ident(pkgbits.SyncLocalIdent) }
+func (r *reader) selector() (*types.Package, string) { return r.ident(pkgbits.SyncSelector) }
+
+func (r *reader) ident(marker pkgbits.SyncMarker) (*types.Package, string) {
+ r.Sync(marker)
+ return r.pkg(), r.String()
+}
+
+// pkgScope returns pkg.Scope().
+// If pkg is nil, it returns types.Universe instead.
+//
+// TODO(mdempsky): Remove after x/tools can depend on Go 1.19.
+func pkgScope(pkg *types.Package) *types.Scope {
+ if pkg != nil {
+ return pkg.Scope()
+ }
+ return types.Universe
+}
diff --git a/vendor/golang.org/x/tools/internal/gocommand/invoke.go b/vendor/golang.org/x/tools/internal/gocommand/invoke.go
new file mode 100644
index 0000000000..55312522dc
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/gocommand/invoke.go
@@ -0,0 +1,465 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package gocommand is a helper for calling the go command.
+package gocommand
+
+import (
+ "bytes"
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "log"
+ "os"
+ "os/exec"
+ "reflect"
+ "regexp"
+ "runtime"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "golang.org/x/tools/internal/event"
+ "golang.org/x/tools/internal/event/keys"
+ "golang.org/x/tools/internal/event/label"
+ "golang.org/x/tools/internal/event/tag"
+)
+
+// An Runner will run go command invocations and serialize
+// them if it sees a concurrency error.
+type Runner struct {
+ // once guards the runner initialization.
+ once sync.Once
+
+ // inFlight tracks available workers.
+ inFlight chan struct{}
+
+ // serialized guards the ability to run a go command serially,
+ // to avoid deadlocks when claiming workers.
+ serialized chan struct{}
+}
+
+const maxInFlight = 10
+
+func (runner *Runner) initialize() {
+ runner.once.Do(func() {
+ runner.inFlight = make(chan struct{}, maxInFlight)
+ runner.serialized = make(chan struct{}, 1)
+ })
+}
+
+// 1.13: go: updates to go.mod needed, but contents have changed
+// 1.14: go: updating go.mod: existing contents have changed since last read
+var modConcurrencyError = regexp.MustCompile(`go:.*go.mod.*contents have changed`)
+
+// verb is an event label for the go command verb.
+var verb = keys.NewString("verb", "go command verb")
+
+func invLabels(inv Invocation) []label.Label {
+ return []label.Label{verb.Of(inv.Verb), tag.Directory.Of(inv.WorkingDir)}
+}
+
+// Run is a convenience wrapper around RunRaw.
+// It returns only stdout and a "friendly" error.
+func (runner *Runner) Run(ctx context.Context, inv Invocation) (*bytes.Buffer, error) {
+ ctx, done := event.Start(ctx, "gocommand.Runner.Run", invLabels(inv)...)
+ defer done()
+
+ stdout, _, friendly, _ := runner.RunRaw(ctx, inv)
+ return stdout, friendly
+}
+
+// RunPiped runs the invocation serially, always waiting for any concurrent
+// invocations to complete first.
+func (runner *Runner) RunPiped(ctx context.Context, inv Invocation, stdout, stderr io.Writer) error {
+ ctx, done := event.Start(ctx, "gocommand.Runner.RunPiped", invLabels(inv)...)
+ defer done()
+
+ _, err := runner.runPiped(ctx, inv, stdout, stderr)
+ return err
+}
+
+// RunRaw runs the invocation, serializing requests only if they fight over
+// go.mod changes.
+// Postcondition: both error results have same nilness.
+func (runner *Runner) RunRaw(ctx context.Context, inv Invocation) (*bytes.Buffer, *bytes.Buffer, error, error) {
+ ctx, done := event.Start(ctx, "gocommand.Runner.RunRaw", invLabels(inv)...)
+ defer done()
+ // Make sure the runner is always initialized.
+ runner.initialize()
+
+ // First, try to run the go command concurrently.
+ stdout, stderr, friendlyErr, err := runner.runConcurrent(ctx, inv)
+
+ // If we encounter a load concurrency error, we need to retry serially.
+ if friendlyErr != nil && modConcurrencyError.MatchString(friendlyErr.Error()) {
+ event.Error(ctx, "Load concurrency error, will retry serially", err)
+
+ // Run serially by calling runPiped.
+ stdout.Reset()
+ stderr.Reset()
+ friendlyErr, err = runner.runPiped(ctx, inv, stdout, stderr)
+ }
+
+ return stdout, stderr, friendlyErr, err
+}
+
+// Postcondition: both error results have same nilness.
+func (runner *Runner) runConcurrent(ctx context.Context, inv Invocation) (*bytes.Buffer, *bytes.Buffer, error, error) {
+ // Wait for 1 worker to become available.
+ select {
+ case <-ctx.Done():
+ return nil, nil, ctx.Err(), ctx.Err()
+ case runner.inFlight <- struct{}{}:
+ defer func() { <-runner.inFlight }()
+ }
+
+ stdout, stderr := &bytes.Buffer{}, &bytes.Buffer{}
+ friendlyErr, err := inv.runWithFriendlyError(ctx, stdout, stderr)
+ return stdout, stderr, friendlyErr, err
+}
+
+// Postcondition: both error results have same nilness.
+func (runner *Runner) runPiped(ctx context.Context, inv Invocation, stdout, stderr io.Writer) (error, error) {
+ // Make sure the runner is always initialized.
+ runner.initialize()
+
+ // Acquire the serialization lock. This avoids deadlocks between two
+ // runPiped commands.
+ select {
+ case <-ctx.Done():
+ return ctx.Err(), ctx.Err()
+ case runner.serialized <- struct{}{}:
+ defer func() { <-runner.serialized }()
+ }
+
+ // Wait for all in-progress go commands to return before proceeding,
+ // to avoid load concurrency errors.
+ for i := 0; i < maxInFlight; i++ {
+ select {
+ case <-ctx.Done():
+ return ctx.Err(), ctx.Err()
+ case runner.inFlight <- struct{}{}:
+ // Make sure we always "return" any workers we took.
+ defer func() { <-runner.inFlight }()
+ }
+ }
+
+ return inv.runWithFriendlyError(ctx, stdout, stderr)
+}
+
+// An Invocation represents a call to the go command.
+type Invocation struct {
+ Verb string
+ Args []string
+ BuildFlags []string
+
+ // If ModFlag is set, the go command is invoked with -mod=ModFlag.
+ ModFlag string
+
+ // If ModFile is set, the go command is invoked with -modfile=ModFile.
+ ModFile string
+
+ // If Overlay is set, the go command is invoked with -overlay=Overlay.
+ Overlay string
+
+ // If CleanEnv is set, the invocation will run only with the environment
+ // in Env, not starting with os.Environ.
+ CleanEnv bool
+ Env []string
+ WorkingDir string
+ Logf func(format string, args ...interface{})
+}
+
+// Postcondition: both error results have same nilness.
+func (i *Invocation) runWithFriendlyError(ctx context.Context, stdout, stderr io.Writer) (friendlyError error, rawError error) {
+ rawError = i.run(ctx, stdout, stderr)
+ if rawError != nil {
+ friendlyError = rawError
+ // Check for 'go' executable not being found.
+ if ee, ok := rawError.(*exec.Error); ok && ee.Err == exec.ErrNotFound {
+ friendlyError = fmt.Errorf("go command required, not found: %v", ee)
+ }
+ if ctx.Err() != nil {
+ friendlyError = ctx.Err()
+ }
+ friendlyError = fmt.Errorf("err: %v: stderr: %s", friendlyError, stderr)
+ }
+ return
+}
+
+func (i *Invocation) run(ctx context.Context, stdout, stderr io.Writer) error {
+ log := i.Logf
+ if log == nil {
+ log = func(string, ...interface{}) {}
+ }
+
+ goArgs := []string{i.Verb}
+
+ appendModFile := func() {
+ if i.ModFile != "" {
+ goArgs = append(goArgs, "-modfile="+i.ModFile)
+ }
+ }
+ appendModFlag := func() {
+ if i.ModFlag != "" {
+ goArgs = append(goArgs, "-mod="+i.ModFlag)
+ }
+ }
+ appendOverlayFlag := func() {
+ if i.Overlay != "" {
+ goArgs = append(goArgs, "-overlay="+i.Overlay)
+ }
+ }
+
+ switch i.Verb {
+ case "env", "version":
+ goArgs = append(goArgs, i.Args...)
+ case "mod":
+ // mod needs the sub-verb before flags.
+ goArgs = append(goArgs, i.Args[0])
+ appendModFile()
+ goArgs = append(goArgs, i.Args[1:]...)
+ case "get":
+ goArgs = append(goArgs, i.BuildFlags...)
+ appendModFile()
+ goArgs = append(goArgs, i.Args...)
+
+ default: // notably list and build.
+ goArgs = append(goArgs, i.BuildFlags...)
+ appendModFile()
+ appendModFlag()
+ appendOverlayFlag()
+ goArgs = append(goArgs, i.Args...)
+ }
+ cmd := exec.Command("go", goArgs...)
+ cmd.Stdout = stdout
+ cmd.Stderr = stderr
+
+ // cmd.WaitDelay was added only in go1.20 (see #50436).
+ if waitDelay := reflect.ValueOf(cmd).Elem().FieldByName("WaitDelay"); waitDelay.IsValid() {
+ // https://go.dev/issue/59541: don't wait forever copying stderr
+ // after the command has exited.
+ // After CL 484741 we copy stdout manually, so we we'll stop reading that as
+ // soon as ctx is done. However, we also don't want to wait around forever
+ // for stderr. Give a much-longer-than-reasonable delay and then assume that
+ // something has wedged in the kernel or runtime.
+ waitDelay.Set(reflect.ValueOf(30 * time.Second))
+ }
+
+ // On darwin the cwd gets resolved to the real path, which breaks anything that
+ // expects the working directory to keep the original path, including the
+ // go command when dealing with modules.
+ // The Go stdlib has a special feature where if the cwd and the PWD are the
+ // same node then it trusts the PWD, so by setting it in the env for the child
+ // process we fix up all the paths returned by the go command.
+ if !i.CleanEnv {
+ cmd.Env = os.Environ()
+ }
+ cmd.Env = append(cmd.Env, i.Env...)
+ if i.WorkingDir != "" {
+ cmd.Env = append(cmd.Env, "PWD="+i.WorkingDir)
+ cmd.Dir = i.WorkingDir
+ }
+
+ defer func(start time.Time) { log("%s for %v", time.Since(start), cmdDebugStr(cmd)) }(time.Now())
+
+ return runCmdContext(ctx, cmd)
+}
+
+// DebugHangingGoCommands may be set by tests to enable additional
+// instrumentation (including panics) for debugging hanging Go commands.
+//
+// See golang/go#54461 for details.
+var DebugHangingGoCommands = false
+
+// runCmdContext is like exec.CommandContext except it sends os.Interrupt
+// before os.Kill.
+func runCmdContext(ctx context.Context, cmd *exec.Cmd) (err error) {
+ // If cmd.Stdout is not an *os.File, the exec package will create a pipe and
+ // copy it to the Writer in a goroutine until the process has finished and
+ // either the pipe reaches EOF or command's WaitDelay expires.
+ //
+ // However, the output from 'go list' can be quite large, and we don't want to
+ // keep reading (and allocating buffers) if we've already decided we don't
+ // care about the output. We don't want to wait for the process to finish, and
+ // we don't wait to wait for the WaitDelay to expire either.
+ //
+ // Instead, if cmd.Stdout requires a copying goroutine we explicitly replace
+ // it with a pipe (which is an *os.File), which we can close in order to stop
+ // copying output as soon as we realize we don't care about it.
+ var stdoutW *os.File
+ if cmd.Stdout != nil {
+ if _, ok := cmd.Stdout.(*os.File); !ok {
+ var stdoutR *os.File
+ stdoutR, stdoutW, err = os.Pipe()
+ if err != nil {
+ return err
+ }
+ prevStdout := cmd.Stdout
+ cmd.Stdout = stdoutW
+
+ stdoutErr := make(chan error, 1)
+ go func() {
+ _, err := io.Copy(prevStdout, stdoutR)
+ if err != nil {
+ err = fmt.Errorf("copying stdout: %w", err)
+ }
+ stdoutErr <- err
+ }()
+ defer func() {
+ // We started a goroutine to copy a stdout pipe.
+ // Wait for it to finish, or terminate it if need be.
+ var err2 error
+ select {
+ case err2 = <-stdoutErr:
+ stdoutR.Close()
+ case <-ctx.Done():
+ stdoutR.Close()
+ // Per https://pkg.go.dev/os#File.Close, the call to stdoutR.Close
+ // should cause the Read call in io.Copy to unblock and return
+ // immediately, but we still need to receive from stdoutErr to confirm
+ // that it has happened.
+ <-stdoutErr
+ err2 = ctx.Err()
+ }
+ if err == nil {
+ err = err2
+ }
+ }()
+
+ // Per https://pkg.go.dev/os/exec#Cmd, “If Stdout and Stderr are the
+ // same writer, and have a type that can be compared with ==, at most
+ // one goroutine at a time will call Write.”
+ //
+ // Since we're starting a goroutine that writes to cmd.Stdout, we must
+ // also update cmd.Stderr so that it still holds.
+ func() {
+ defer func() { recover() }()
+ if cmd.Stderr == prevStdout {
+ cmd.Stderr = cmd.Stdout
+ }
+ }()
+ }
+ }
+
+ err = cmd.Start()
+ if stdoutW != nil {
+ // The child process has inherited the pipe file,
+ // so close the copy held in this process.
+ stdoutW.Close()
+ stdoutW = nil
+ }
+ if err != nil {
+ return err
+ }
+
+ resChan := make(chan error, 1)
+ go func() {
+ resChan <- cmd.Wait()
+ }()
+
+ // If we're interested in debugging hanging Go commands, stop waiting after a
+ // minute and panic with interesting information.
+ debug := DebugHangingGoCommands
+ if debug {
+ timer := time.NewTimer(1 * time.Minute)
+ defer timer.Stop()
+ select {
+ case err := <-resChan:
+ return err
+ case <-timer.C:
+ HandleHangingGoCommand(cmd.Process)
+ case <-ctx.Done():
+ }
+ } else {
+ select {
+ case err := <-resChan:
+ return err
+ case <-ctx.Done():
+ }
+ }
+
+ // Cancelled. Interrupt and see if it ends voluntarily.
+ if err := cmd.Process.Signal(os.Interrupt); err == nil {
+ // (We used to wait only 1s but this proved
+ // fragile on loaded builder machines.)
+ timer := time.NewTimer(5 * time.Second)
+ defer timer.Stop()
+ select {
+ case err := <-resChan:
+ return err
+ case <-timer.C:
+ }
+ }
+
+ // Didn't shut down in response to interrupt. Kill it hard.
+ // TODO(rfindley): per advice from bcmills@, it may be better to send SIGQUIT
+ // on certain platforms, such as unix.
+ if err := cmd.Process.Kill(); err != nil && !errors.Is(err, os.ErrProcessDone) && debug {
+ log.Printf("error killing the Go command: %v", err)
+ }
+
+ return <-resChan
+}
+
+func HandleHangingGoCommand(proc *os.Process) {
+ switch runtime.GOOS {
+ case "linux", "darwin", "freebsd", "netbsd":
+ fmt.Fprintln(os.Stderr, `DETECTED A HANGING GO COMMAND
+
+The gopls test runner has detected a hanging go command. In order to debug
+this, the output of ps and lsof/fstat is printed below.
+
+See golang/go#54461 for more details.`)
+
+ fmt.Fprintln(os.Stderr, "\nps axo ppid,pid,command:")
+ fmt.Fprintln(os.Stderr, "-------------------------")
+ psCmd := exec.Command("ps", "axo", "ppid,pid,command")
+ psCmd.Stdout = os.Stderr
+ psCmd.Stderr = os.Stderr
+ if err := psCmd.Run(); err != nil {
+ panic(fmt.Sprintf("running ps: %v", err))
+ }
+
+ listFiles := "lsof"
+ if runtime.GOOS == "freebsd" || runtime.GOOS == "netbsd" {
+ listFiles = "fstat"
+ }
+
+ fmt.Fprintln(os.Stderr, "\n"+listFiles+":")
+ fmt.Fprintln(os.Stderr, "-----")
+ listFilesCmd := exec.Command(listFiles)
+ listFilesCmd.Stdout = os.Stderr
+ listFilesCmd.Stderr = os.Stderr
+ if err := listFilesCmd.Run(); err != nil {
+ panic(fmt.Sprintf("running %s: %v", listFiles, err))
+ }
+ }
+ panic(fmt.Sprintf("detected hanging go command (pid %d): see golang/go#54461 for more details", proc.Pid))
+}
+
+func cmdDebugStr(cmd *exec.Cmd) string {
+ env := make(map[string]string)
+ for _, kv := range cmd.Env {
+ split := strings.SplitN(kv, "=", 2)
+ if len(split) == 2 {
+ k, v := split[0], split[1]
+ env[k] = v
+ }
+ }
+
+ var args []string
+ for _, arg := range cmd.Args {
+ quoted := strconv.Quote(arg)
+ if quoted[1:len(quoted)-1] != arg || strings.Contains(arg, " ") {
+ args = append(args, quoted)
+ } else {
+ args = append(args, arg)
+ }
+ }
+ return fmt.Sprintf("GOROOT=%v GOPATH=%v GO111MODULE=%v GOPROXY=%v PWD=%v %v", env["GOROOT"], env["GOPATH"], env["GO111MODULE"], env["GOPROXY"], env["PWD"], strings.Join(args, " "))
+}
diff --git a/vendor/golang.org/x/tools/internal/gocommand/vendor.go b/vendor/golang.org/x/tools/internal/gocommand/vendor.go
new file mode 100644
index 0000000000..2d3d408c0b
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/gocommand/vendor.go
@@ -0,0 +1,109 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gocommand
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "os"
+ "path/filepath"
+ "regexp"
+ "strings"
+ "time"
+
+ "golang.org/x/mod/semver"
+)
+
+// ModuleJSON holds information about a module.
+type ModuleJSON struct {
+ Path string // module path
+ Version string // module version
+ Versions []string // available module versions (with -versions)
+ Replace *ModuleJSON // replaced by this module
+ Time *time.Time // time version was created
+ Update *ModuleJSON // available update, if any (with -u)
+ Main bool // is this the main module?
+ Indirect bool // is this module only an indirect dependency of main module?
+ Dir string // directory holding files for this module, if any
+ GoMod string // path to go.mod file used when loading this module, if any
+ GoVersion string // go version used in module
+}
+
+var modFlagRegexp = regexp.MustCompile(`-mod[ =](\w+)`)
+
+// VendorEnabled reports whether vendoring is enabled. It takes a *Runner to execute Go commands
+// with the supplied context.Context and Invocation. The Invocation can contain pre-defined fields,
+// of which only Verb and Args are modified to run the appropriate Go command.
+// Inspired by setDefaultBuildMod in modload/init.go
+func VendorEnabled(ctx context.Context, inv Invocation, r *Runner) (bool, *ModuleJSON, error) {
+ mainMod, go114, err := getMainModuleAnd114(ctx, inv, r)
+ if err != nil {
+ return false, nil, err
+ }
+
+ // We check the GOFLAGS to see if there is anything overridden or not.
+ inv.Verb = "env"
+ inv.Args = []string{"GOFLAGS"}
+ stdout, err := r.Run(ctx, inv)
+ if err != nil {
+ return false, nil, err
+ }
+ goflags := string(bytes.TrimSpace(stdout.Bytes()))
+ matches := modFlagRegexp.FindStringSubmatch(goflags)
+ var modFlag string
+ if len(matches) != 0 {
+ modFlag = matches[1]
+ }
+ // Don't override an explicit '-mod=' argument.
+ if modFlag == "vendor" {
+ return true, mainMod, nil
+ } else if modFlag != "" {
+ return false, nil, nil
+ }
+ if mainMod == nil || !go114 {
+ return false, nil, nil
+ }
+ // Check 1.14's automatic vendor mode.
+ if fi, err := os.Stat(filepath.Join(mainMod.Dir, "vendor")); err == nil && fi.IsDir() {
+ if mainMod.GoVersion != "" && semver.Compare("v"+mainMod.GoVersion, "v1.14") >= 0 {
+ // The Go version is at least 1.14, and a vendor directory exists.
+ // Set -mod=vendor by default.
+ return true, mainMod, nil
+ }
+ }
+ return false, nil, nil
+}
+
+// getMainModuleAnd114 gets one of the main modules' information and whether the
+// go command in use is 1.14+. This is the information needed to figure out
+// if vendoring should be enabled.
+func getMainModuleAnd114(ctx context.Context, inv Invocation, r *Runner) (*ModuleJSON, bool, error) {
+ const format = `{{.Path}}
+{{.Dir}}
+{{.GoMod}}
+{{.GoVersion}}
+{{range context.ReleaseTags}}{{if eq . "go1.14"}}{{.}}{{end}}{{end}}
+`
+ inv.Verb = "list"
+ inv.Args = []string{"-m", "-f", format}
+ stdout, err := r.Run(ctx, inv)
+ if err != nil {
+ return nil, false, err
+ }
+
+ lines := strings.Split(stdout.String(), "\n")
+ if len(lines) < 5 {
+ return nil, false, fmt.Errorf("unexpected stdout: %q", stdout.String())
+ }
+ mod := &ModuleJSON{
+ Path: lines[0],
+ Dir: lines[1],
+ GoMod: lines[2],
+ GoVersion: lines[3],
+ Main: true,
+ }
+ return mod, lines[4] == "go1.14", nil
+}
diff --git a/vendor/golang.org/x/tools/internal/gocommand/version.go b/vendor/golang.org/x/tools/internal/gocommand/version.go
new file mode 100644
index 0000000000..446c5846a6
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/gocommand/version.go
@@ -0,0 +1,71 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gocommand
+
+import (
+ "context"
+ "fmt"
+ "regexp"
+ "strings"
+)
+
+// GoVersion reports the minor version number of the highest release
+// tag built into the go command on the PATH.
+//
+// Note that this may be higher than the version of the go tool used
+// to build this application, and thus the versions of the standard
+// go/{scanner,parser,ast,types} packages that are linked into it.
+// In that case, callers should either downgrade to the version of
+// go used to build the application, or report an error that the
+// application is too old to use the go command on the PATH.
+func GoVersion(ctx context.Context, inv Invocation, r *Runner) (int, error) {
+ inv.Verb = "list"
+ inv.Args = []string{"-e", "-f", `{{context.ReleaseTags}}`, `--`, `unsafe`}
+ inv.BuildFlags = nil // This is not a build command.
+ inv.ModFlag = ""
+ inv.ModFile = ""
+ inv.Env = append(inv.Env[:len(inv.Env):len(inv.Env)], "GO111MODULE=off")
+
+ stdoutBytes, err := r.Run(ctx, inv)
+ if err != nil {
+ return 0, err
+ }
+ stdout := stdoutBytes.String()
+ if len(stdout) < 3 {
+ return 0, fmt.Errorf("bad ReleaseTags output: %q", stdout)
+ }
+ // Split up "[go1.1 go1.15]" and return highest go1.X value.
+ tags := strings.Fields(stdout[1 : len(stdout)-2])
+ for i := len(tags) - 1; i >= 0; i-- {
+ var version int
+ if _, err := fmt.Sscanf(tags[i], "go1.%d", &version); err != nil {
+ continue
+ }
+ return version, nil
+ }
+ return 0, fmt.Errorf("no parseable ReleaseTags in %v", tags)
+}
+
+// GoVersionOutput returns the complete output of the go version command.
+func GoVersionOutput(ctx context.Context, inv Invocation, r *Runner) (string, error) {
+ inv.Verb = "version"
+ goVersion, err := r.Run(ctx, inv)
+ if err != nil {
+ return "", err
+ }
+ return goVersion.String(), nil
+}
+
+// ParseGoVersionOutput extracts the Go version string
+// from the output of the "go version" command.
+// Given an unrecognized form, it returns an empty string.
+func ParseGoVersionOutput(data string) string {
+ re := regexp.MustCompile(`^go version (go\S+|devel \S+)`)
+ m := re.FindStringSubmatch(data)
+ if len(m) != 2 {
+ return "" // unrecognized version
+ }
+ return m[1]
+}
diff --git a/vendor/golang.org/x/tools/internal/packagesinternal/packages.go b/vendor/golang.org/x/tools/internal/packagesinternal/packages.go
new file mode 100644
index 0000000000..44719de173
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/packagesinternal/packages.go
@@ -0,0 +1,22 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package packagesinternal exposes internal-only fields from go/packages.
+package packagesinternal
+
+var GetForTest = func(p interface{}) string { return "" }
+var GetDepsErrors = func(p interface{}) []*PackageError { return nil }
+
+type PackageError struct {
+ ImportStack []string // shortest path from package named on command line to this one
+ Pos string // position of error (if present, file:line:col)
+ Err string // the error itself
+}
+
+var TypecheckCgo int
+var DepsErrors int // must be set as a LoadMode to call GetDepsErrors
+var ForTest int // must be set as a LoadMode to call GetForTest
+
+var SetModFlag = func(config interface{}, value string) {}
+var SetModFile = func(config interface{}, value string) {}
diff --git a/vendor/golang.org/x/tools/internal/pkgbits/codes.go b/vendor/golang.org/x/tools/internal/pkgbits/codes.go
new file mode 100644
index 0000000000..f0cabde96e
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/pkgbits/codes.go
@@ -0,0 +1,77 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pkgbits
+
+// A Code is an enum value that can be encoded into bitstreams.
+//
+// Code types are preferable for enum types, because they allow
+// Decoder to detect desyncs.
+type Code interface {
+ // Marker returns the SyncMarker for the Code's dynamic type.
+ Marker() SyncMarker
+
+ // Value returns the Code's ordinal value.
+ Value() int
+}
+
+// A CodeVal distinguishes among go/constant.Value encodings.
+type CodeVal int
+
+func (c CodeVal) Marker() SyncMarker { return SyncVal }
+func (c CodeVal) Value() int { return int(c) }
+
+// Note: These values are public and cannot be changed without
+// updating the go/types importers.
+
+const (
+ ValBool CodeVal = iota
+ ValString
+ ValInt64
+ ValBigInt
+ ValBigRat
+ ValBigFloat
+)
+
+// A CodeType distinguishes among go/types.Type encodings.
+type CodeType int
+
+func (c CodeType) Marker() SyncMarker { return SyncType }
+func (c CodeType) Value() int { return int(c) }
+
+// Note: These values are public and cannot be changed without
+// updating the go/types importers.
+
+const (
+ TypeBasic CodeType = iota
+ TypeNamed
+ TypePointer
+ TypeSlice
+ TypeArray
+ TypeChan
+ TypeMap
+ TypeSignature
+ TypeStruct
+ TypeInterface
+ TypeUnion
+ TypeTypeParam
+)
+
+// A CodeObj distinguishes among go/types.Object encodings.
+type CodeObj int
+
+func (c CodeObj) Marker() SyncMarker { return SyncCodeObj }
+func (c CodeObj) Value() int { return int(c) }
+
+// Note: These values are public and cannot be changed without
+// updating the go/types importers.
+
+const (
+ ObjAlias CodeObj = iota
+ ObjConst
+ ObjType
+ ObjFunc
+ ObjVar
+ ObjStub
+)
diff --git a/vendor/golang.org/x/tools/internal/pkgbits/decoder.go b/vendor/golang.org/x/tools/internal/pkgbits/decoder.go
new file mode 100644
index 0000000000..b92e8e6eb3
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/pkgbits/decoder.go
@@ -0,0 +1,517 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pkgbits
+
+import (
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "go/constant"
+ "go/token"
+ "io"
+ "math/big"
+ "os"
+ "runtime"
+ "strings"
+)
+
+// A PkgDecoder provides methods for decoding a package's Unified IR
+// export data.
+type PkgDecoder struct {
+ // version is the file format version.
+ version uint32
+
+ // sync indicates whether the file uses sync markers.
+ sync bool
+
+ // pkgPath is the package path for the package to be decoded.
+ //
+ // TODO(mdempsky): Remove; unneeded since CL 391014.
+ pkgPath string
+
+ // elemData is the full data payload of the encoded package.
+ // Elements are densely and contiguously packed together.
+ //
+ // The last 8 bytes of elemData are the package fingerprint.
+ elemData string
+
+ // elemEnds stores the byte-offset end positions of element
+ // bitstreams within elemData.
+ //
+ // For example, element I's bitstream data starts at elemEnds[I-1]
+ // (or 0, if I==0) and ends at elemEnds[I].
+ //
+ // Note: elemEnds is indexed by absolute indices, not
+ // section-relative indices.
+ elemEnds []uint32
+
+ // elemEndsEnds stores the index-offset end positions of relocation
+ // sections within elemEnds.
+ //
+ // For example, section K's end positions start at elemEndsEnds[K-1]
+ // (or 0, if K==0) and end at elemEndsEnds[K].
+ elemEndsEnds [numRelocs]uint32
+
+ scratchRelocEnt []RelocEnt
+}
+
+// PkgPath returns the package path for the package
+//
+// TODO(mdempsky): Remove; unneeded since CL 391014.
+func (pr *PkgDecoder) PkgPath() string { return pr.pkgPath }
+
+// SyncMarkers reports whether pr uses sync markers.
+func (pr *PkgDecoder) SyncMarkers() bool { return pr.sync }
+
+// NewPkgDecoder returns a PkgDecoder initialized to read the Unified
+// IR export data from input. pkgPath is the package path for the
+// compilation unit that produced the export data.
+//
+// TODO(mdempsky): Remove pkgPath parameter; unneeded since CL 391014.
+func NewPkgDecoder(pkgPath, input string) PkgDecoder {
+ pr := PkgDecoder{
+ pkgPath: pkgPath,
+ }
+
+ // TODO(mdempsky): Implement direct indexing of input string to
+ // avoid copying the position information.
+
+ r := strings.NewReader(input)
+
+ assert(binary.Read(r, binary.LittleEndian, &pr.version) == nil)
+
+ switch pr.version {
+ default:
+ panic(fmt.Errorf("unsupported version: %v", pr.version))
+ case 0:
+ // no flags
+ case 1:
+ var flags uint32
+ assert(binary.Read(r, binary.LittleEndian, &flags) == nil)
+ pr.sync = flags&flagSyncMarkers != 0
+ }
+
+ assert(binary.Read(r, binary.LittleEndian, pr.elemEndsEnds[:]) == nil)
+
+ pr.elemEnds = make([]uint32, pr.elemEndsEnds[len(pr.elemEndsEnds)-1])
+ assert(binary.Read(r, binary.LittleEndian, pr.elemEnds[:]) == nil)
+
+ pos, err := r.Seek(0, io.SeekCurrent)
+ assert(err == nil)
+
+ pr.elemData = input[pos:]
+ assert(len(pr.elemData)-8 == int(pr.elemEnds[len(pr.elemEnds)-1]))
+
+ return pr
+}
+
+// NumElems returns the number of elements in section k.
+func (pr *PkgDecoder) NumElems(k RelocKind) int {
+ count := int(pr.elemEndsEnds[k])
+ if k > 0 {
+ count -= int(pr.elemEndsEnds[k-1])
+ }
+ return count
+}
+
+// TotalElems returns the total number of elements across all sections.
+func (pr *PkgDecoder) TotalElems() int {
+ return len(pr.elemEnds)
+}
+
+// Fingerprint returns the package fingerprint.
+func (pr *PkgDecoder) Fingerprint() [8]byte {
+ var fp [8]byte
+ copy(fp[:], pr.elemData[len(pr.elemData)-8:])
+ return fp
+}
+
+// AbsIdx returns the absolute index for the given (section, index)
+// pair.
+func (pr *PkgDecoder) AbsIdx(k RelocKind, idx Index) int {
+ absIdx := int(idx)
+ if k > 0 {
+ absIdx += int(pr.elemEndsEnds[k-1])
+ }
+ if absIdx >= int(pr.elemEndsEnds[k]) {
+ errorf("%v:%v is out of bounds; %v", k, idx, pr.elemEndsEnds)
+ }
+ return absIdx
+}
+
+// DataIdx returns the raw element bitstream for the given (section,
+// index) pair.
+func (pr *PkgDecoder) DataIdx(k RelocKind, idx Index) string {
+ absIdx := pr.AbsIdx(k, idx)
+
+ var start uint32
+ if absIdx > 0 {
+ start = pr.elemEnds[absIdx-1]
+ }
+ end := pr.elemEnds[absIdx]
+
+ return pr.elemData[start:end]
+}
+
+// StringIdx returns the string value for the given string index.
+func (pr *PkgDecoder) StringIdx(idx Index) string {
+ return pr.DataIdx(RelocString, idx)
+}
+
+// NewDecoder returns a Decoder for the given (section, index) pair,
+// and decodes the given SyncMarker from the element bitstream.
+func (pr *PkgDecoder) NewDecoder(k RelocKind, idx Index, marker SyncMarker) Decoder {
+ r := pr.NewDecoderRaw(k, idx)
+ r.Sync(marker)
+ return r
+}
+
+// TempDecoder returns a Decoder for the given (section, index) pair,
+// and decodes the given SyncMarker from the element bitstream.
+// If possible the Decoder should be RetireDecoder'd when it is no longer
+// needed, this will avoid heap allocations.
+func (pr *PkgDecoder) TempDecoder(k RelocKind, idx Index, marker SyncMarker) Decoder {
+ r := pr.TempDecoderRaw(k, idx)
+ r.Sync(marker)
+ return r
+}
+
+func (pr *PkgDecoder) RetireDecoder(d *Decoder) {
+ pr.scratchRelocEnt = d.Relocs
+ d.Relocs = nil
+}
+
+// NewDecoderRaw returns a Decoder for the given (section, index) pair.
+//
+// Most callers should use NewDecoder instead.
+func (pr *PkgDecoder) NewDecoderRaw(k RelocKind, idx Index) Decoder {
+ r := Decoder{
+ common: pr,
+ k: k,
+ Idx: idx,
+ }
+
+ // TODO(mdempsky) r.data.Reset(...) after #44505 is resolved.
+ r.Data = *strings.NewReader(pr.DataIdx(k, idx))
+
+ r.Sync(SyncRelocs)
+ r.Relocs = make([]RelocEnt, r.Len())
+ for i := range r.Relocs {
+ r.Sync(SyncReloc)
+ r.Relocs[i] = RelocEnt{RelocKind(r.Len()), Index(r.Len())}
+ }
+
+ return r
+}
+
+func (pr *PkgDecoder) TempDecoderRaw(k RelocKind, idx Index) Decoder {
+ r := Decoder{
+ common: pr,
+ k: k,
+ Idx: idx,
+ }
+
+ r.Data.Reset(pr.DataIdx(k, idx))
+ r.Sync(SyncRelocs)
+ l := r.Len()
+ if cap(pr.scratchRelocEnt) >= l {
+ r.Relocs = pr.scratchRelocEnt[:l]
+ pr.scratchRelocEnt = nil
+ } else {
+ r.Relocs = make([]RelocEnt, l)
+ }
+ for i := range r.Relocs {
+ r.Sync(SyncReloc)
+ r.Relocs[i] = RelocEnt{RelocKind(r.Len()), Index(r.Len())}
+ }
+
+ return r
+}
+
+// A Decoder provides methods for decoding an individual element's
+// bitstream data.
+type Decoder struct {
+ common *PkgDecoder
+
+ Relocs []RelocEnt
+ Data strings.Reader
+
+ k RelocKind
+ Idx Index
+}
+
+func (r *Decoder) checkErr(err error) {
+ if err != nil {
+ errorf("unexpected decoding error: %w", err)
+ }
+}
+
+func (r *Decoder) rawUvarint() uint64 {
+ x, err := readUvarint(&r.Data)
+ r.checkErr(err)
+ return x
+}
+
+// readUvarint is a type-specialized copy of encoding/binary.ReadUvarint.
+// This avoids the interface conversion and thus has better escape properties,
+// which flows up the stack.
+func readUvarint(r *strings.Reader) (uint64, error) {
+ var x uint64
+ var s uint
+ for i := 0; i < binary.MaxVarintLen64; i++ {
+ b, err := r.ReadByte()
+ if err != nil {
+ if i > 0 && err == io.EOF {
+ err = io.ErrUnexpectedEOF
+ }
+ return x, err
+ }
+ if b < 0x80 {
+ if i == binary.MaxVarintLen64-1 && b > 1 {
+ return x, overflow
+ }
+ return x | uint64(b)<> 1)
+ if ux&1 != 0 {
+ x = ^x
+ }
+ return x
+}
+
+func (r *Decoder) rawReloc(k RelocKind, idx int) Index {
+ e := r.Relocs[idx]
+ assert(e.Kind == k)
+ return e.Idx
+}
+
+// Sync decodes a sync marker from the element bitstream and asserts
+// that it matches the expected marker.
+//
+// If r.common.sync is false, then Sync is a no-op.
+func (r *Decoder) Sync(mWant SyncMarker) {
+ if !r.common.sync {
+ return
+ }
+
+ pos, _ := r.Data.Seek(0, io.SeekCurrent)
+ mHave := SyncMarker(r.rawUvarint())
+ writerPCs := make([]int, r.rawUvarint())
+ for i := range writerPCs {
+ writerPCs[i] = int(r.rawUvarint())
+ }
+
+ if mHave == mWant {
+ return
+ }
+
+ // There's some tension here between printing:
+ //
+ // (1) full file paths that tools can recognize (e.g., so emacs
+ // hyperlinks the "file:line" text for easy navigation), or
+ //
+ // (2) short file paths that are easier for humans to read (e.g., by
+ // omitting redundant or irrelevant details, so it's easier to
+ // focus on the useful bits that remain).
+ //
+ // The current formatting favors the former, as it seems more
+ // helpful in practice. But perhaps the formatting could be improved
+ // to better address both concerns. For example, use relative file
+ // paths if they would be shorter, or rewrite file paths to contain
+ // "$GOROOT" (like objabi.AbsFile does) if tools can be taught how
+ // to reliably expand that again.
+
+ fmt.Printf("export data desync: package %q, section %v, index %v, offset %v\n", r.common.pkgPath, r.k, r.Idx, pos)
+
+ fmt.Printf("\nfound %v, written at:\n", mHave)
+ if len(writerPCs) == 0 {
+ fmt.Printf("\t[stack trace unavailable; recompile package %q with -d=syncframes]\n", r.common.pkgPath)
+ }
+ for _, pc := range writerPCs {
+ fmt.Printf("\t%s\n", r.common.StringIdx(r.rawReloc(RelocString, pc)))
+ }
+
+ fmt.Printf("\nexpected %v, reading at:\n", mWant)
+ var readerPCs [32]uintptr // TODO(mdempsky): Dynamically size?
+ n := runtime.Callers(2, readerPCs[:])
+ for _, pc := range fmtFrames(readerPCs[:n]...) {
+ fmt.Printf("\t%s\n", pc)
+ }
+
+ // We already printed a stack trace for the reader, so now we can
+ // simply exit. Printing a second one with panic or base.Fatalf
+ // would just be noise.
+ os.Exit(1)
+}
+
+// Bool decodes and returns a bool value from the element bitstream.
+func (r *Decoder) Bool() bool {
+ r.Sync(SyncBool)
+ x, err := r.Data.ReadByte()
+ r.checkErr(err)
+ assert(x < 2)
+ return x != 0
+}
+
+// Int64 decodes and returns an int64 value from the element bitstream.
+func (r *Decoder) Int64() int64 {
+ r.Sync(SyncInt64)
+ return r.rawVarint()
+}
+
+// Uint64 decodes and returns a uint64 value from the element bitstream.
+func (r *Decoder) Uint64() uint64 {
+ r.Sync(SyncUint64)
+ return r.rawUvarint()
+}
+
+// Len decodes and returns a non-negative int value from the element bitstream.
+func (r *Decoder) Len() int { x := r.Uint64(); v := int(x); assert(uint64(v) == x); return v }
+
+// Int decodes and returns an int value from the element bitstream.
+func (r *Decoder) Int() int { x := r.Int64(); v := int(x); assert(int64(v) == x); return v }
+
+// Uint decodes and returns a uint value from the element bitstream.
+func (r *Decoder) Uint() uint { x := r.Uint64(); v := uint(x); assert(uint64(v) == x); return v }
+
+// Code decodes a Code value from the element bitstream and returns
+// its ordinal value. It's the caller's responsibility to convert the
+// result to an appropriate Code type.
+//
+// TODO(mdempsky): Ideally this method would have signature "Code[T
+// Code] T" instead, but we don't allow generic methods and the
+// compiler can't depend on generics yet anyway.
+func (r *Decoder) Code(mark SyncMarker) int {
+ r.Sync(mark)
+ return r.Len()
+}
+
+// Reloc decodes a relocation of expected section k from the element
+// bitstream and returns an index to the referenced element.
+func (r *Decoder) Reloc(k RelocKind) Index {
+ r.Sync(SyncUseReloc)
+ return r.rawReloc(k, r.Len())
+}
+
+// String decodes and returns a string value from the element
+// bitstream.
+func (r *Decoder) String() string {
+ r.Sync(SyncString)
+ return r.common.StringIdx(r.Reloc(RelocString))
+}
+
+// Strings decodes and returns a variable-length slice of strings from
+// the element bitstream.
+func (r *Decoder) Strings() []string {
+ res := make([]string, r.Len())
+ for i := range res {
+ res[i] = r.String()
+ }
+ return res
+}
+
+// Value decodes and returns a constant.Value from the element
+// bitstream.
+func (r *Decoder) Value() constant.Value {
+ r.Sync(SyncValue)
+ isComplex := r.Bool()
+ val := r.scalar()
+ if isComplex {
+ val = constant.BinaryOp(val, token.ADD, constant.MakeImag(r.scalar()))
+ }
+ return val
+}
+
+func (r *Decoder) scalar() constant.Value {
+ switch tag := CodeVal(r.Code(SyncVal)); tag {
+ default:
+ panic(fmt.Errorf("unexpected scalar tag: %v", tag))
+
+ case ValBool:
+ return constant.MakeBool(r.Bool())
+ case ValString:
+ return constant.MakeString(r.String())
+ case ValInt64:
+ return constant.MakeInt64(r.Int64())
+ case ValBigInt:
+ return constant.Make(r.bigInt())
+ case ValBigRat:
+ num := r.bigInt()
+ denom := r.bigInt()
+ return constant.Make(new(big.Rat).SetFrac(num, denom))
+ case ValBigFloat:
+ return constant.Make(r.bigFloat())
+ }
+}
+
+func (r *Decoder) bigInt() *big.Int {
+ v := new(big.Int).SetBytes([]byte(r.String()))
+ if r.Bool() {
+ v.Neg(v)
+ }
+ return v
+}
+
+func (r *Decoder) bigFloat() *big.Float {
+ v := new(big.Float).SetPrec(512)
+ assert(v.UnmarshalText([]byte(r.String())) == nil)
+ return v
+}
+
+// @@@ Helpers
+
+// TODO(mdempsky): These should probably be removed. I think they're a
+// smell that the export data format is not yet quite right.
+
+// PeekPkgPath returns the package path for the specified package
+// index.
+func (pr *PkgDecoder) PeekPkgPath(idx Index) string {
+ var path string
+ {
+ r := pr.TempDecoder(RelocPkg, idx, SyncPkgDef)
+ path = r.String()
+ pr.RetireDecoder(&r)
+ }
+ if path == "" {
+ path = pr.pkgPath
+ }
+ return path
+}
+
+// PeekObj returns the package path, object name, and CodeObj for the
+// specified object index.
+func (pr *PkgDecoder) PeekObj(idx Index) (string, string, CodeObj) {
+ var ridx Index
+ var name string
+ var rcode int
+ {
+ r := pr.TempDecoder(RelocName, idx, SyncObject1)
+ r.Sync(SyncSym)
+ r.Sync(SyncPkg)
+ ridx = r.Reloc(RelocPkg)
+ name = r.String()
+ rcode = r.Code(SyncCodeObj)
+ pr.RetireDecoder(&r)
+ }
+
+ path := pr.PeekPkgPath(ridx)
+ assert(name != "")
+
+ tag := CodeObj(rcode)
+
+ return path, name, tag
+}
diff --git a/vendor/golang.org/x/tools/internal/pkgbits/doc.go b/vendor/golang.org/x/tools/internal/pkgbits/doc.go
new file mode 100644
index 0000000000..c8a2796b5e
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/pkgbits/doc.go
@@ -0,0 +1,32 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package pkgbits implements low-level coding abstractions for
+// Unified IR's export data format.
+//
+// At a low-level, a package is a collection of bitstream elements.
+// Each element has a "kind" and a dense, non-negative index.
+// Elements can be randomly accessed given their kind and index.
+//
+// Individual elements are sequences of variable-length values (e.g.,
+// integers, booleans, strings, go/constant values, cross-references
+// to other elements). Package pkgbits provides APIs for encoding and
+// decoding these low-level values, but the details of mapping
+// higher-level Go constructs into elements is left to higher-level
+// abstractions.
+//
+// Elements may cross-reference each other with "relocations." For
+// example, an element representing a pointer type has a relocation
+// referring to the element type.
+//
+// Go constructs may be composed as a constellation of multiple
+// elements. For example, a declared function may have one element to
+// describe the object (e.g., its name, type, position), and a
+// separate element to describe its function body. This allows readers
+// some flexibility in efficiently seeking or re-reading data (e.g.,
+// inlining requires re-reading the function body for each inlined
+// call, without needing to re-read the object-level details).
+//
+// This is a copy of internal/pkgbits in the Go implementation.
+package pkgbits
diff --git a/vendor/golang.org/x/tools/internal/pkgbits/encoder.go b/vendor/golang.org/x/tools/internal/pkgbits/encoder.go
new file mode 100644
index 0000000000..6482617a4f
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/pkgbits/encoder.go
@@ -0,0 +1,383 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pkgbits
+
+import (
+ "bytes"
+ "crypto/md5"
+ "encoding/binary"
+ "go/constant"
+ "io"
+ "math/big"
+ "runtime"
+)
+
+// currentVersion is the current version number.
+//
+// - v0: initial prototype
+//
+// - v1: adds the flags uint32 word
+const currentVersion uint32 = 1
+
+// A PkgEncoder provides methods for encoding a package's Unified IR
+// export data.
+type PkgEncoder struct {
+ // elems holds the bitstream for previously encoded elements.
+ elems [numRelocs][]string
+
+ // stringsIdx maps previously encoded strings to their index within
+ // the RelocString section, to allow deduplication. That is,
+ // elems[RelocString][stringsIdx[s]] == s (if present).
+ stringsIdx map[string]Index
+
+ // syncFrames is the number of frames to write at each sync
+ // marker. A negative value means sync markers are omitted.
+ syncFrames int
+}
+
+// SyncMarkers reports whether pw uses sync markers.
+func (pw *PkgEncoder) SyncMarkers() bool { return pw.syncFrames >= 0 }
+
+// NewPkgEncoder returns an initialized PkgEncoder.
+//
+// syncFrames is the number of caller frames that should be serialized
+// at Sync points. Serializing additional frames results in larger
+// export data files, but can help diagnosing desync errors in
+// higher-level Unified IR reader/writer code. If syncFrames is
+// negative, then sync markers are omitted entirely.
+func NewPkgEncoder(syncFrames int) PkgEncoder {
+ return PkgEncoder{
+ stringsIdx: make(map[string]Index),
+ syncFrames: syncFrames,
+ }
+}
+
+// DumpTo writes the package's encoded data to out0 and returns the
+// package fingerprint.
+func (pw *PkgEncoder) DumpTo(out0 io.Writer) (fingerprint [8]byte) {
+ h := md5.New()
+ out := io.MultiWriter(out0, h)
+
+ writeUint32 := func(x uint32) {
+ assert(binary.Write(out, binary.LittleEndian, x) == nil)
+ }
+
+ writeUint32(currentVersion)
+
+ var flags uint32
+ if pw.SyncMarkers() {
+ flags |= flagSyncMarkers
+ }
+ writeUint32(flags)
+
+ // Write elemEndsEnds.
+ var sum uint32
+ for _, elems := range &pw.elems {
+ sum += uint32(len(elems))
+ writeUint32(sum)
+ }
+
+ // Write elemEnds.
+ sum = 0
+ for _, elems := range &pw.elems {
+ for _, elem := range elems {
+ sum += uint32(len(elem))
+ writeUint32(sum)
+ }
+ }
+
+ // Write elemData.
+ for _, elems := range &pw.elems {
+ for _, elem := range elems {
+ _, err := io.WriteString(out, elem)
+ assert(err == nil)
+ }
+ }
+
+ // Write fingerprint.
+ copy(fingerprint[:], h.Sum(nil))
+ _, err := out0.Write(fingerprint[:])
+ assert(err == nil)
+
+ return
+}
+
+// StringIdx adds a string value to the strings section, if not
+// already present, and returns its index.
+func (pw *PkgEncoder) StringIdx(s string) Index {
+ if idx, ok := pw.stringsIdx[s]; ok {
+ assert(pw.elems[RelocString][idx] == s)
+ return idx
+ }
+
+ idx := Index(len(pw.elems[RelocString]))
+ pw.elems[RelocString] = append(pw.elems[RelocString], s)
+ pw.stringsIdx[s] = idx
+ return idx
+}
+
+// NewEncoder returns an Encoder for a new element within the given
+// section, and encodes the given SyncMarker as the start of the
+// element bitstream.
+func (pw *PkgEncoder) NewEncoder(k RelocKind, marker SyncMarker) Encoder {
+ e := pw.NewEncoderRaw(k)
+ e.Sync(marker)
+ return e
+}
+
+// NewEncoderRaw returns an Encoder for a new element within the given
+// section.
+//
+// Most callers should use NewEncoder instead.
+func (pw *PkgEncoder) NewEncoderRaw(k RelocKind) Encoder {
+ idx := Index(len(pw.elems[k]))
+ pw.elems[k] = append(pw.elems[k], "") // placeholder
+
+ return Encoder{
+ p: pw,
+ k: k,
+ Idx: idx,
+ }
+}
+
+// An Encoder provides methods for encoding an individual element's
+// bitstream data.
+type Encoder struct {
+ p *PkgEncoder
+
+ Relocs []RelocEnt
+ RelocMap map[RelocEnt]uint32
+ Data bytes.Buffer // accumulated element bitstream data
+
+ encodingRelocHeader bool
+
+ k RelocKind
+ Idx Index // index within relocation section
+}
+
+// Flush finalizes the element's bitstream and returns its Index.
+func (w *Encoder) Flush() Index {
+ var sb bytes.Buffer // TODO(mdempsky): strings.Builder after #44505 is resolved
+
+ // Backup the data so we write the relocations at the front.
+ var tmp bytes.Buffer
+ io.Copy(&tmp, &w.Data)
+
+ // TODO(mdempsky): Consider writing these out separately so they're
+ // easier to strip, along with function bodies, so that we can prune
+ // down to just the data that's relevant to go/types.
+ if w.encodingRelocHeader {
+ panic("encodingRelocHeader already true; recursive flush?")
+ }
+ w.encodingRelocHeader = true
+ w.Sync(SyncRelocs)
+ w.Len(len(w.Relocs))
+ for _, rEnt := range w.Relocs {
+ w.Sync(SyncReloc)
+ w.Len(int(rEnt.Kind))
+ w.Len(int(rEnt.Idx))
+ }
+
+ io.Copy(&sb, &w.Data)
+ io.Copy(&sb, &tmp)
+ w.p.elems[w.k][w.Idx] = sb.String()
+
+ return w.Idx
+}
+
+func (w *Encoder) checkErr(err error) {
+ if err != nil {
+ errorf("unexpected encoding error: %v", err)
+ }
+}
+
+func (w *Encoder) rawUvarint(x uint64) {
+ var buf [binary.MaxVarintLen64]byte
+ n := binary.PutUvarint(buf[:], x)
+ _, err := w.Data.Write(buf[:n])
+ w.checkErr(err)
+}
+
+func (w *Encoder) rawVarint(x int64) {
+ // Zig-zag encode.
+ ux := uint64(x) << 1
+ if x < 0 {
+ ux = ^ux
+ }
+
+ w.rawUvarint(ux)
+}
+
+func (w *Encoder) rawReloc(r RelocKind, idx Index) int {
+ e := RelocEnt{r, idx}
+ if w.RelocMap != nil {
+ if i, ok := w.RelocMap[e]; ok {
+ return int(i)
+ }
+ } else {
+ w.RelocMap = make(map[RelocEnt]uint32)
+ }
+
+ i := len(w.Relocs)
+ w.RelocMap[e] = uint32(i)
+ w.Relocs = append(w.Relocs, e)
+ return i
+}
+
+func (w *Encoder) Sync(m SyncMarker) {
+ if !w.p.SyncMarkers() {
+ return
+ }
+
+ // Writing out stack frame string references requires working
+ // relocations, but writing out the relocations themselves involves
+ // sync markers. To prevent infinite recursion, we simply trim the
+ // stack frame for sync markers within the relocation header.
+ var frames []string
+ if !w.encodingRelocHeader && w.p.syncFrames > 0 {
+ pcs := make([]uintptr, w.p.syncFrames)
+ n := runtime.Callers(2, pcs)
+ frames = fmtFrames(pcs[:n]...)
+ }
+
+ // TODO(mdempsky): Save space by writing out stack frames as a
+ // linked list so we can share common stack frames.
+ w.rawUvarint(uint64(m))
+ w.rawUvarint(uint64(len(frames)))
+ for _, frame := range frames {
+ w.rawUvarint(uint64(w.rawReloc(RelocString, w.p.StringIdx(frame))))
+ }
+}
+
+// Bool encodes and writes a bool value into the element bitstream,
+// and then returns the bool value.
+//
+// For simple, 2-alternative encodings, the idiomatic way to call Bool
+// is something like:
+//
+// if w.Bool(x != 0) {
+// // alternative #1
+// } else {
+// // alternative #2
+// }
+//
+// For multi-alternative encodings, use Code instead.
+func (w *Encoder) Bool(b bool) bool {
+ w.Sync(SyncBool)
+ var x byte
+ if b {
+ x = 1
+ }
+ err := w.Data.WriteByte(x)
+ w.checkErr(err)
+ return b
+}
+
+// Int64 encodes and writes an int64 value into the element bitstream.
+func (w *Encoder) Int64(x int64) {
+ w.Sync(SyncInt64)
+ w.rawVarint(x)
+}
+
+// Uint64 encodes and writes a uint64 value into the element bitstream.
+func (w *Encoder) Uint64(x uint64) {
+ w.Sync(SyncUint64)
+ w.rawUvarint(x)
+}
+
+// Len encodes and writes a non-negative int value into the element bitstream.
+func (w *Encoder) Len(x int) { assert(x >= 0); w.Uint64(uint64(x)) }
+
+// Int encodes and writes an int value into the element bitstream.
+func (w *Encoder) Int(x int) { w.Int64(int64(x)) }
+
+// Uint encodes and writes a uint value into the element bitstream.
+func (w *Encoder) Uint(x uint) { w.Uint64(uint64(x)) }
+
+// Reloc encodes and writes a relocation for the given (section,
+// index) pair into the element bitstream.
+//
+// Note: Only the index is formally written into the element
+// bitstream, so bitstream decoders must know from context which
+// section an encoded relocation refers to.
+func (w *Encoder) Reloc(r RelocKind, idx Index) {
+ w.Sync(SyncUseReloc)
+ w.Len(w.rawReloc(r, idx))
+}
+
+// Code encodes and writes a Code value into the element bitstream.
+func (w *Encoder) Code(c Code) {
+ w.Sync(c.Marker())
+ w.Len(c.Value())
+}
+
+// String encodes and writes a string value into the element
+// bitstream.
+//
+// Internally, strings are deduplicated by adding them to the strings
+// section (if not already present), and then writing a relocation
+// into the element bitstream.
+func (w *Encoder) String(s string) {
+ w.Sync(SyncString)
+ w.Reloc(RelocString, w.p.StringIdx(s))
+}
+
+// Strings encodes and writes a variable-length slice of strings into
+// the element bitstream.
+func (w *Encoder) Strings(ss []string) {
+ w.Len(len(ss))
+ for _, s := range ss {
+ w.String(s)
+ }
+}
+
+// Value encodes and writes a constant.Value into the element
+// bitstream.
+func (w *Encoder) Value(val constant.Value) {
+ w.Sync(SyncValue)
+ if w.Bool(val.Kind() == constant.Complex) {
+ w.scalar(constant.Real(val))
+ w.scalar(constant.Imag(val))
+ } else {
+ w.scalar(val)
+ }
+}
+
+func (w *Encoder) scalar(val constant.Value) {
+ switch v := constant.Val(val).(type) {
+ default:
+ errorf("unhandled %v (%v)", val, val.Kind())
+ case bool:
+ w.Code(ValBool)
+ w.Bool(v)
+ case string:
+ w.Code(ValString)
+ w.String(v)
+ case int64:
+ w.Code(ValInt64)
+ w.Int64(v)
+ case *big.Int:
+ w.Code(ValBigInt)
+ w.bigInt(v)
+ case *big.Rat:
+ w.Code(ValBigRat)
+ w.bigInt(v.Num())
+ w.bigInt(v.Denom())
+ case *big.Float:
+ w.Code(ValBigFloat)
+ w.bigFloat(v)
+ }
+}
+
+func (w *Encoder) bigInt(v *big.Int) {
+ b := v.Bytes()
+ w.String(string(b)) // TODO: More efficient encoding.
+ w.Bool(v.Sign() < 0)
+}
+
+func (w *Encoder) bigFloat(v *big.Float) {
+ b := v.Append(nil, 'p', -1)
+ w.String(string(b)) // TODO: More efficient encoding.
+}
diff --git a/vendor/golang.org/x/tools/internal/pkgbits/flags.go b/vendor/golang.org/x/tools/internal/pkgbits/flags.go
new file mode 100644
index 0000000000..654222745f
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/pkgbits/flags.go
@@ -0,0 +1,9 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pkgbits
+
+const (
+ flagSyncMarkers = 1 << iota // file format contains sync markers
+)
diff --git a/vendor/golang.org/x/tools/internal/pkgbits/frames_go1.go b/vendor/golang.org/x/tools/internal/pkgbits/frames_go1.go
new file mode 100644
index 0000000000..5294f6a63e
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/pkgbits/frames_go1.go
@@ -0,0 +1,21 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !go1.7
+// +build !go1.7
+
+// TODO(mdempsky): Remove after #44505 is resolved
+
+package pkgbits
+
+import "runtime"
+
+func walkFrames(pcs []uintptr, visit frameVisitor) {
+ for _, pc := range pcs {
+ fn := runtime.FuncForPC(pc)
+ file, line := fn.FileLine(pc)
+
+ visit(file, line, fn.Name(), pc-fn.Entry())
+ }
+}
diff --git a/vendor/golang.org/x/tools/internal/pkgbits/frames_go17.go b/vendor/golang.org/x/tools/internal/pkgbits/frames_go17.go
new file mode 100644
index 0000000000..2324ae7adf
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/pkgbits/frames_go17.go
@@ -0,0 +1,28 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.7
+// +build go1.7
+
+package pkgbits
+
+import "runtime"
+
+// walkFrames calls visit for each call frame represented by pcs.
+//
+// pcs should be a slice of PCs, as returned by runtime.Callers.
+func walkFrames(pcs []uintptr, visit frameVisitor) {
+ if len(pcs) == 0 {
+ return
+ }
+
+ frames := runtime.CallersFrames(pcs)
+ for {
+ frame, more := frames.Next()
+ visit(frame.File, frame.Line, frame.Function, frame.PC-frame.Entry)
+ if !more {
+ return
+ }
+ }
+}
diff --git a/vendor/golang.org/x/tools/internal/pkgbits/reloc.go b/vendor/golang.org/x/tools/internal/pkgbits/reloc.go
new file mode 100644
index 0000000000..fcdfb97ca9
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/pkgbits/reloc.go
@@ -0,0 +1,42 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pkgbits
+
+// A RelocKind indicates a particular section within a unified IR export.
+type RelocKind int32
+
+// An Index represents a bitstream element index within a particular
+// section.
+type Index int32
+
+// A relocEnt (relocation entry) is an entry in an element's local
+// reference table.
+//
+// TODO(mdempsky): Rename this too.
+type RelocEnt struct {
+ Kind RelocKind
+ Idx Index
+}
+
+// Reserved indices within the meta relocation section.
+const (
+ PublicRootIdx Index = 0
+ PrivateRootIdx Index = 1
+)
+
+const (
+ RelocString RelocKind = iota
+ RelocMeta
+ RelocPosBase
+ RelocPkg
+ RelocName
+ RelocType
+ RelocObj
+ RelocObjExt
+ RelocObjDict
+ RelocBody
+
+ numRelocs = iota
+)
diff --git a/vendor/golang.org/x/tools/internal/pkgbits/support.go b/vendor/golang.org/x/tools/internal/pkgbits/support.go
new file mode 100644
index 0000000000..ad26d3b28c
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/pkgbits/support.go
@@ -0,0 +1,17 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pkgbits
+
+import "fmt"
+
+func assert(b bool) {
+ if !b {
+ panic("assertion failed")
+ }
+}
+
+func errorf(format string, args ...interface{}) {
+ panic(fmt.Errorf(format, args...))
+}
diff --git a/vendor/golang.org/x/tools/internal/pkgbits/sync.go b/vendor/golang.org/x/tools/internal/pkgbits/sync.go
new file mode 100644
index 0000000000..5bd51ef717
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/pkgbits/sync.go
@@ -0,0 +1,113 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pkgbits
+
+import (
+ "fmt"
+ "strings"
+)
+
+// fmtFrames formats a backtrace for reporting reader/writer desyncs.
+func fmtFrames(pcs ...uintptr) []string {
+ res := make([]string, 0, len(pcs))
+ walkFrames(pcs, func(file string, line int, name string, offset uintptr) {
+ // Trim package from function name. It's just redundant noise.
+ name = strings.TrimPrefix(name, "cmd/compile/internal/noder.")
+
+ res = append(res, fmt.Sprintf("%s:%v: %s +0x%v", file, line, name, offset))
+ })
+ return res
+}
+
+type frameVisitor func(file string, line int, name string, offset uintptr)
+
+// SyncMarker is an enum type that represents markers that may be
+// written to export data to ensure the reader and writer stay
+// synchronized.
+type SyncMarker int
+
+//go:generate stringer -type=SyncMarker -trimprefix=Sync
+
+const (
+ _ SyncMarker = iota
+
+ // Public markers (known to go/types importers).
+
+ // Low-level coding markers.
+ SyncEOF
+ SyncBool
+ SyncInt64
+ SyncUint64
+ SyncString
+ SyncValue
+ SyncVal
+ SyncRelocs
+ SyncReloc
+ SyncUseReloc
+
+ // Higher-level object and type markers.
+ SyncPublic
+ SyncPos
+ SyncPosBase
+ SyncObject
+ SyncObject1
+ SyncPkg
+ SyncPkgDef
+ SyncMethod
+ SyncType
+ SyncTypeIdx
+ SyncTypeParamNames
+ SyncSignature
+ SyncParams
+ SyncParam
+ SyncCodeObj
+ SyncSym
+ SyncLocalIdent
+ SyncSelector
+
+ // Private markers (only known to cmd/compile).
+ SyncPrivate
+
+ SyncFuncExt
+ SyncVarExt
+ SyncTypeExt
+ SyncPragma
+
+ SyncExprList
+ SyncExprs
+ SyncExpr
+ SyncExprType
+ SyncAssign
+ SyncOp
+ SyncFuncLit
+ SyncCompLit
+
+ SyncDecl
+ SyncFuncBody
+ SyncOpenScope
+ SyncCloseScope
+ SyncCloseAnotherScope
+ SyncDeclNames
+ SyncDeclName
+
+ SyncStmts
+ SyncBlockStmt
+ SyncIfStmt
+ SyncForStmt
+ SyncSwitchStmt
+ SyncRangeStmt
+ SyncCaseClause
+ SyncCommClause
+ SyncSelectStmt
+ SyncDecls
+ SyncLabeledStmt
+ SyncUseObjLocal
+ SyncAddLocal
+ SyncLinkname
+ SyncStmt1
+ SyncStmtsEnd
+ SyncLabel
+ SyncOptLabel
+)
diff --git a/vendor/golang.org/x/tools/internal/pkgbits/syncmarker_string.go b/vendor/golang.org/x/tools/internal/pkgbits/syncmarker_string.go
new file mode 100644
index 0000000000..4a5b0ca5f2
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/pkgbits/syncmarker_string.go
@@ -0,0 +1,89 @@
+// Code generated by "stringer -type=SyncMarker -trimprefix=Sync"; DO NOT EDIT.
+
+package pkgbits
+
+import "strconv"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[SyncEOF-1]
+ _ = x[SyncBool-2]
+ _ = x[SyncInt64-3]
+ _ = x[SyncUint64-4]
+ _ = x[SyncString-5]
+ _ = x[SyncValue-6]
+ _ = x[SyncVal-7]
+ _ = x[SyncRelocs-8]
+ _ = x[SyncReloc-9]
+ _ = x[SyncUseReloc-10]
+ _ = x[SyncPublic-11]
+ _ = x[SyncPos-12]
+ _ = x[SyncPosBase-13]
+ _ = x[SyncObject-14]
+ _ = x[SyncObject1-15]
+ _ = x[SyncPkg-16]
+ _ = x[SyncPkgDef-17]
+ _ = x[SyncMethod-18]
+ _ = x[SyncType-19]
+ _ = x[SyncTypeIdx-20]
+ _ = x[SyncTypeParamNames-21]
+ _ = x[SyncSignature-22]
+ _ = x[SyncParams-23]
+ _ = x[SyncParam-24]
+ _ = x[SyncCodeObj-25]
+ _ = x[SyncSym-26]
+ _ = x[SyncLocalIdent-27]
+ _ = x[SyncSelector-28]
+ _ = x[SyncPrivate-29]
+ _ = x[SyncFuncExt-30]
+ _ = x[SyncVarExt-31]
+ _ = x[SyncTypeExt-32]
+ _ = x[SyncPragma-33]
+ _ = x[SyncExprList-34]
+ _ = x[SyncExprs-35]
+ _ = x[SyncExpr-36]
+ _ = x[SyncExprType-37]
+ _ = x[SyncAssign-38]
+ _ = x[SyncOp-39]
+ _ = x[SyncFuncLit-40]
+ _ = x[SyncCompLit-41]
+ _ = x[SyncDecl-42]
+ _ = x[SyncFuncBody-43]
+ _ = x[SyncOpenScope-44]
+ _ = x[SyncCloseScope-45]
+ _ = x[SyncCloseAnotherScope-46]
+ _ = x[SyncDeclNames-47]
+ _ = x[SyncDeclName-48]
+ _ = x[SyncStmts-49]
+ _ = x[SyncBlockStmt-50]
+ _ = x[SyncIfStmt-51]
+ _ = x[SyncForStmt-52]
+ _ = x[SyncSwitchStmt-53]
+ _ = x[SyncRangeStmt-54]
+ _ = x[SyncCaseClause-55]
+ _ = x[SyncCommClause-56]
+ _ = x[SyncSelectStmt-57]
+ _ = x[SyncDecls-58]
+ _ = x[SyncLabeledStmt-59]
+ _ = x[SyncUseObjLocal-60]
+ _ = x[SyncAddLocal-61]
+ _ = x[SyncLinkname-62]
+ _ = x[SyncStmt1-63]
+ _ = x[SyncStmtsEnd-64]
+ _ = x[SyncLabel-65]
+ _ = x[SyncOptLabel-66]
+}
+
+const _SyncMarker_name = "EOFBoolInt64Uint64StringValueValRelocsRelocUseRelocPublicPosPosBaseObjectObject1PkgPkgDefMethodTypeTypeIdxTypeParamNamesSignatureParamsParamCodeObjSymLocalIdentSelectorPrivateFuncExtVarExtTypeExtPragmaExprListExprsExprExprTypeAssignOpFuncLitCompLitDeclFuncBodyOpenScopeCloseScopeCloseAnotherScopeDeclNamesDeclNameStmtsBlockStmtIfStmtForStmtSwitchStmtRangeStmtCaseClauseCommClauseSelectStmtDeclsLabeledStmtUseObjLocalAddLocalLinknameStmt1StmtsEndLabelOptLabel"
+
+var _SyncMarker_index = [...]uint16{0, 3, 7, 12, 18, 24, 29, 32, 38, 43, 51, 57, 60, 67, 73, 80, 83, 89, 95, 99, 106, 120, 129, 135, 140, 147, 150, 160, 168, 175, 182, 188, 195, 201, 209, 214, 218, 226, 232, 234, 241, 248, 252, 260, 269, 279, 296, 305, 313, 318, 327, 333, 340, 350, 359, 369, 379, 389, 394, 405, 416, 424, 432, 437, 445, 450, 458}
+
+func (i SyncMarker) String() string {
+ i -= 1
+ if i < 0 || i >= SyncMarker(len(_SyncMarker_index)-1) {
+ return "SyncMarker(" + strconv.FormatInt(int64(i+1), 10) + ")"
+ }
+ return _SyncMarker_name[_SyncMarker_index[i]:_SyncMarker_index[i+1]]
+}
diff --git a/vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go b/vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go
new file mode 100644
index 0000000000..7e638ec24f
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go
@@ -0,0 +1,151 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// package tokeninternal provides access to some internal features of the token
+// package.
+package tokeninternal
+
+import (
+ "fmt"
+ "go/token"
+ "sort"
+ "sync"
+ "unsafe"
+)
+
+// GetLines returns the table of line-start offsets from a token.File.
+func GetLines(file *token.File) []int {
+ // token.File has a Lines method on Go 1.21 and later.
+ if file, ok := (interface{})(file).(interface{ Lines() []int }); ok {
+ return file.Lines()
+ }
+
+ // This declaration must match that of token.File.
+ // This creates a risk of dependency skew.
+ // For now we check that the size of the two
+ // declarations is the same, on the (fragile) assumption
+ // that future changes would add fields.
+ type tokenFile119 struct {
+ _ string
+ _ int
+ _ int
+ mu sync.Mutex // we're not complete monsters
+ lines []int
+ _ []struct{}
+ }
+ type tokenFile118 struct {
+ _ *token.FileSet // deleted in go1.19
+ tokenFile119
+ }
+
+ type uP = unsafe.Pointer
+ switch unsafe.Sizeof(*file) {
+ case unsafe.Sizeof(tokenFile118{}):
+ var ptr *tokenFile118
+ *(*uP)(uP(&ptr)) = uP(file)
+ ptr.mu.Lock()
+ defer ptr.mu.Unlock()
+ return ptr.lines
+
+ case unsafe.Sizeof(tokenFile119{}):
+ var ptr *tokenFile119
+ *(*uP)(uP(&ptr)) = uP(file)
+ ptr.mu.Lock()
+ defer ptr.mu.Unlock()
+ return ptr.lines
+
+ default:
+ panic("unexpected token.File size")
+ }
+}
+
+// AddExistingFiles adds the specified files to the FileSet if they
+// are not already present. It panics if any pair of files in the
+// resulting FileSet would overlap.
+func AddExistingFiles(fset *token.FileSet, files []*token.File) {
+ // Punch through the FileSet encapsulation.
+ type tokenFileSet struct {
+ // This type remained essentially consistent from go1.16 to go1.21.
+ mutex sync.RWMutex
+ base int
+ files []*token.File
+ _ *token.File // changed to atomic.Pointer[token.File] in go1.19
+ }
+
+ // If the size of token.FileSet changes, this will fail to compile.
+ const delta = int64(unsafe.Sizeof(tokenFileSet{})) - int64(unsafe.Sizeof(token.FileSet{}))
+ var _ [-delta * delta]int
+
+ type uP = unsafe.Pointer
+ var ptr *tokenFileSet
+ *(*uP)(uP(&ptr)) = uP(fset)
+ ptr.mutex.Lock()
+ defer ptr.mutex.Unlock()
+
+ // Merge and sort.
+ newFiles := append(ptr.files, files...)
+ sort.Slice(newFiles, func(i, j int) bool {
+ return newFiles[i].Base() < newFiles[j].Base()
+ })
+
+ // Reject overlapping files.
+ // Discard adjacent identical files.
+ out := newFiles[:0]
+ for i, file := range newFiles {
+ if i > 0 {
+ prev := newFiles[i-1]
+ if file == prev {
+ continue
+ }
+ if prev.Base()+prev.Size()+1 > file.Base() {
+ panic(fmt.Sprintf("file %s (%d-%d) overlaps with file %s (%d-%d)",
+ prev.Name(), prev.Base(), prev.Base()+prev.Size(),
+ file.Name(), file.Base(), file.Base()+file.Size()))
+ }
+ }
+ out = append(out, file)
+ }
+ newFiles = out
+
+ ptr.files = newFiles
+
+ // Advance FileSet.Base().
+ if len(newFiles) > 0 {
+ last := newFiles[len(newFiles)-1]
+ newBase := last.Base() + last.Size() + 1
+ if ptr.base < newBase {
+ ptr.base = newBase
+ }
+ }
+}
+
+// FileSetFor returns a new FileSet containing a sequence of new Files with
+// the same base, size, and line as the input files, for use in APIs that
+// require a FileSet.
+//
+// Precondition: the input files must be non-overlapping, and sorted in order
+// of their Base.
+func FileSetFor(files ...*token.File) *token.FileSet {
+ fset := token.NewFileSet()
+ for _, f := range files {
+ f2 := fset.AddFile(f.Name(), f.Base(), f.Size())
+ lines := GetLines(f)
+ f2.SetLines(lines)
+ }
+ return fset
+}
+
+// CloneFileSet creates a new FileSet holding all files in fset. It does not
+// create copies of the token.Files in fset: they are added to the resulting
+// FileSet unmodified.
+func CloneFileSet(fset *token.FileSet) *token.FileSet {
+ var files []*token.File
+ fset.Iterate(func(f *token.File) bool {
+ files = append(files, f)
+ return true
+ })
+ newFileSet := token.NewFileSet()
+ AddExistingFiles(newFileSet, files)
+ return newFileSet
+}
diff --git a/vendor/golang.org/x/tools/internal/typeparams/common.go b/vendor/golang.org/x/tools/internal/typeparams/common.go
new file mode 100644
index 0000000000..cdab988531
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/typeparams/common.go
@@ -0,0 +1,204 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package typeparams contains common utilities for writing tools that interact
+// with generic Go code, as introduced with Go 1.18.
+//
+// Many of the types and functions in this package are proxies for the new APIs
+// introduced in the standard library with Go 1.18. For example, the
+// typeparams.Union type is an alias for go/types.Union, and the ForTypeSpec
+// function returns the value of the go/ast.TypeSpec.TypeParams field. At Go
+// versions older than 1.18 these helpers are implemented as stubs, allowing
+// users of this package to write code that handles generic constructs inline,
+// even if the Go version being used to compile does not support generics.
+//
+// Additionally, this package contains common utilities for working with the
+// new generic constructs, to supplement the standard library APIs. Notably,
+// the StructuralTerms API computes a minimal representation of the structural
+// restrictions on a type parameter.
+//
+// An external version of these APIs is available in the
+// golang.org/x/exp/typeparams module.
+package typeparams
+
+import (
+ "fmt"
+ "go/ast"
+ "go/token"
+ "go/types"
+)
+
+// UnpackIndexExpr extracts data from AST nodes that represent index
+// expressions.
+//
+// For an ast.IndexExpr, the resulting indices slice will contain exactly one
+// index expression. For an ast.IndexListExpr (go1.18+), it may have a variable
+// number of index expressions.
+//
+// For nodes that don't represent index expressions, the first return value of
+// UnpackIndexExpr will be nil.
+func UnpackIndexExpr(n ast.Node) (x ast.Expr, lbrack token.Pos, indices []ast.Expr, rbrack token.Pos) {
+ switch e := n.(type) {
+ case *ast.IndexExpr:
+ return e.X, e.Lbrack, []ast.Expr{e.Index}, e.Rbrack
+ case *ast.IndexListExpr:
+ return e.X, e.Lbrack, e.Indices, e.Rbrack
+ }
+ return nil, token.NoPos, nil, token.NoPos
+}
+
+// PackIndexExpr returns an *ast.IndexExpr or *ast.IndexListExpr, depending on
+// the cardinality of indices. Calling PackIndexExpr with len(indices) == 0
+// will panic.
+func PackIndexExpr(x ast.Expr, lbrack token.Pos, indices []ast.Expr, rbrack token.Pos) ast.Expr {
+ switch len(indices) {
+ case 0:
+ panic("empty indices")
+ case 1:
+ return &ast.IndexExpr{
+ X: x,
+ Lbrack: lbrack,
+ Index: indices[0],
+ Rbrack: rbrack,
+ }
+ default:
+ return &ast.IndexListExpr{
+ X: x,
+ Lbrack: lbrack,
+ Indices: indices,
+ Rbrack: rbrack,
+ }
+ }
+}
+
+// IsTypeParam reports whether t is a type parameter.
+func IsTypeParam(t types.Type) bool {
+ _, ok := t.(*types.TypeParam)
+ return ok
+}
+
+// OriginMethod returns the origin method associated with the method fn.
+// For methods on a non-generic receiver base type, this is just
+// fn. However, for methods with a generic receiver, OriginMethod returns the
+// corresponding method in the method set of the origin type.
+//
+// As a special case, if fn is not a method (has no receiver), OriginMethod
+// returns fn.
+func OriginMethod(fn *types.Func) *types.Func {
+ recv := fn.Type().(*types.Signature).Recv()
+ if recv == nil {
+ return fn
+ }
+ base := recv.Type()
+ p, isPtr := base.(*types.Pointer)
+ if isPtr {
+ base = p.Elem()
+ }
+ named, isNamed := base.(*types.Named)
+ if !isNamed {
+ // Receiver is a *types.Interface.
+ return fn
+ }
+ if named.TypeParams().Len() == 0 {
+ // Receiver base has no type parameters, so we can avoid the lookup below.
+ return fn
+ }
+ orig := named.Origin()
+ gfn, _, _ := types.LookupFieldOrMethod(orig, true, fn.Pkg(), fn.Name())
+
+ // This is a fix for a gopls crash (#60628) due to a go/types bug (#60634). In:
+ // package p
+ // type T *int
+ // func (*T) f() {}
+ // LookupFieldOrMethod(T, true, p, f)=nil, but NewMethodSet(*T)={(*T).f}.
+ // Here we make them consistent by force.
+ // (The go/types bug is general, but this workaround is reached only
+ // for generic T thanks to the early return above.)
+ if gfn == nil {
+ mset := types.NewMethodSet(types.NewPointer(orig))
+ for i := 0; i < mset.Len(); i++ {
+ m := mset.At(i)
+ if m.Obj().Id() == fn.Id() {
+ gfn = m.Obj()
+ break
+ }
+ }
+ }
+
+ // In golang/go#61196, we observe another crash, this time inexplicable.
+ if gfn == nil {
+ panic(fmt.Sprintf("missing origin method for %s.%s; named == origin: %t, named.NumMethods(): %d, origin.NumMethods(): %d", named, fn, named == orig, named.NumMethods(), orig.NumMethods()))
+ }
+
+ return gfn.(*types.Func)
+}
+
+// GenericAssignableTo is a generalization of types.AssignableTo that
+// implements the following rule for uninstantiated generic types:
+//
+// If V and T are generic named types, then V is considered assignable to T if,
+// for every possible instantation of V[A_1, ..., A_N], the instantiation
+// T[A_1, ..., A_N] is valid and V[A_1, ..., A_N] implements T[A_1, ..., A_N].
+//
+// If T has structural constraints, they must be satisfied by V.
+//
+// For example, consider the following type declarations:
+//
+// type Interface[T any] interface {
+// Accept(T)
+// }
+//
+// type Container[T any] struct {
+// Element T
+// }
+//
+// func (c Container[T]) Accept(t T) { c.Element = t }
+//
+// In this case, GenericAssignableTo reports that instantiations of Container
+// are assignable to the corresponding instantiation of Interface.
+func GenericAssignableTo(ctxt *types.Context, V, T types.Type) bool {
+ // If V and T are not both named, or do not have matching non-empty type
+ // parameter lists, fall back on types.AssignableTo.
+
+ VN, Vnamed := V.(*types.Named)
+ TN, Tnamed := T.(*types.Named)
+ if !Vnamed || !Tnamed {
+ return types.AssignableTo(V, T)
+ }
+
+ vtparams := VN.TypeParams()
+ ttparams := TN.TypeParams()
+ if vtparams.Len() == 0 || vtparams.Len() != ttparams.Len() || VN.TypeArgs().Len() != 0 || TN.TypeArgs().Len() != 0 {
+ return types.AssignableTo(V, T)
+ }
+
+ // V and T have the same (non-zero) number of type params. Instantiate both
+ // with the type parameters of V. This must always succeed for V, and will
+ // succeed for T if and only if the type set of each type parameter of V is a
+ // subset of the type set of the corresponding type parameter of T, meaning
+ // that every instantiation of V corresponds to a valid instantiation of T.
+
+ // Minor optimization: ensure we share a context across the two
+ // instantiations below.
+ if ctxt == nil {
+ ctxt = types.NewContext()
+ }
+
+ var targs []types.Type
+ for i := 0; i < vtparams.Len(); i++ {
+ targs = append(targs, vtparams.At(i))
+ }
+
+ vinst, err := types.Instantiate(ctxt, V, targs, true)
+ if err != nil {
+ panic("type parameters should satisfy their own constraints")
+ }
+
+ tinst, err := types.Instantiate(ctxt, T, targs, true)
+ if err != nil {
+ return false
+ }
+
+ return types.AssignableTo(vinst, tinst)
+}
diff --git a/vendor/golang.org/x/tools/internal/typeparams/coretype.go b/vendor/golang.org/x/tools/internal/typeparams/coretype.go
new file mode 100644
index 0000000000..7ea8840eab
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/typeparams/coretype.go
@@ -0,0 +1,122 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typeparams
+
+import (
+ "go/types"
+)
+
+// CoreType returns the core type of T or nil if T does not have a core type.
+//
+// See https://go.dev/ref/spec#Core_types for the definition of a core type.
+func CoreType(T types.Type) types.Type {
+ U := T.Underlying()
+ if _, ok := U.(*types.Interface); !ok {
+ return U // for non-interface types,
+ }
+
+ terms, err := _NormalTerms(U)
+ if len(terms) == 0 || err != nil {
+ // len(terms) -> empty type set of interface.
+ // err != nil => U is invalid, exceeds complexity bounds, or has an empty type set.
+ return nil // no core type.
+ }
+
+ U = terms[0].Type().Underlying()
+ var identical int // i in [0,identical) => Identical(U, terms[i].Type().Underlying())
+ for identical = 1; identical < len(terms); identical++ {
+ if !types.Identical(U, terms[identical].Type().Underlying()) {
+ break
+ }
+ }
+
+ if identical == len(terms) {
+ // https://go.dev/ref/spec#Core_types
+ // "There is a single type U which is the underlying type of all types in the type set of T"
+ return U
+ }
+ ch, ok := U.(*types.Chan)
+ if !ok {
+ return nil // no core type as identical < len(terms) and U is not a channel.
+ }
+ // https://go.dev/ref/spec#Core_types
+ // "the type chan E if T contains only bidirectional channels, or the type chan<- E or
+ // <-chan E depending on the direction of the directional channels present."
+ for chans := identical; chans < len(terms); chans++ {
+ curr, ok := terms[chans].Type().Underlying().(*types.Chan)
+ if !ok {
+ return nil
+ }
+ if !types.Identical(ch.Elem(), curr.Elem()) {
+ return nil // channel elements are not identical.
+ }
+ if ch.Dir() == types.SendRecv {
+ // ch is bidirectional. We can safely always use curr's direction.
+ ch = curr
+ } else if curr.Dir() != types.SendRecv && ch.Dir() != curr.Dir() {
+ // ch and curr are not bidirectional and not the same direction.
+ return nil
+ }
+ }
+ return ch
+}
+
+// _NormalTerms returns a slice of terms representing the normalized structural
+// type restrictions of a type, if any.
+//
+// For all types other than *types.TypeParam, *types.Interface, and
+// *types.Union, this is just a single term with Tilde() == false and
+// Type() == typ. For *types.TypeParam, *types.Interface, and *types.Union, see
+// below.
+//
+// Structural type restrictions of a type parameter are created via
+// non-interface types embedded in its constraint interface (directly, or via a
+// chain of interface embeddings). For example, in the declaration type
+// T[P interface{~int; m()}] int the structural restriction of the type
+// parameter P is ~int.
+//
+// With interface embedding and unions, the specification of structural type
+// restrictions may be arbitrarily complex. For example, consider the
+// following:
+//
+// type A interface{ ~string|~[]byte }
+//
+// type B interface{ int|string }
+//
+// type C interface { ~string|~int }
+//
+// type T[P interface{ A|B; C }] int
+//
+// In this example, the structural type restriction of P is ~string|int: A|B
+// expands to ~string|~[]byte|int|string, which reduces to ~string|~[]byte|int,
+// which when intersected with C (~string|~int) yields ~string|int.
+//
+// _NormalTerms computes these expansions and reductions, producing a
+// "normalized" form of the embeddings. A structural restriction is normalized
+// if it is a single union containing no interface terms, and is minimal in the
+// sense that removing any term changes the set of types satisfying the
+// constraint. It is left as a proof for the reader that, modulo sorting, there
+// is exactly one such normalized form.
+//
+// Because the minimal representation always takes this form, _NormalTerms
+// returns a slice of tilde terms corresponding to the terms of the union in
+// the normalized structural restriction. An error is returned if the type is
+// invalid, exceeds complexity bounds, or has an empty type set. In the latter
+// case, _NormalTerms returns ErrEmptyTypeSet.
+//
+// _NormalTerms makes no guarantees about the order of terms, except that it
+// is deterministic.
+func _NormalTerms(typ types.Type) ([]*types.Term, error) {
+ switch typ := typ.(type) {
+ case *types.TypeParam:
+ return StructuralTerms(typ)
+ case *types.Union:
+ return UnionTermSet(typ)
+ case *types.Interface:
+ return InterfaceTermSet(typ)
+ default:
+ return []*types.Term{types.NewTerm(false, typ)}, nil
+ }
+}
diff --git a/vendor/golang.org/x/tools/internal/typeparams/normalize.go b/vendor/golang.org/x/tools/internal/typeparams/normalize.go
new file mode 100644
index 0000000000..93c80fdc96
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/typeparams/normalize.go
@@ -0,0 +1,218 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typeparams
+
+import (
+ "errors"
+ "fmt"
+ "go/types"
+ "os"
+ "strings"
+)
+
+//go:generate go run copytermlist.go
+
+const debug = false
+
+var ErrEmptyTypeSet = errors.New("empty type set")
+
+// StructuralTerms returns a slice of terms representing the normalized
+// structural type restrictions of a type parameter, if any.
+//
+// Structural type restrictions of a type parameter are created via
+// non-interface types embedded in its constraint interface (directly, or via a
+// chain of interface embeddings). For example, in the declaration
+//
+// type T[P interface{~int; m()}] int
+//
+// the structural restriction of the type parameter P is ~int.
+//
+// With interface embedding and unions, the specification of structural type
+// restrictions may be arbitrarily complex. For example, consider the
+// following:
+//
+// type A interface{ ~string|~[]byte }
+//
+// type B interface{ int|string }
+//
+// type C interface { ~string|~int }
+//
+// type T[P interface{ A|B; C }] int
+//
+// In this example, the structural type restriction of P is ~string|int: A|B
+// expands to ~string|~[]byte|int|string, which reduces to ~string|~[]byte|int,
+// which when intersected with C (~string|~int) yields ~string|int.
+//
+// StructuralTerms computes these expansions and reductions, producing a
+// "normalized" form of the embeddings. A structural restriction is normalized
+// if it is a single union containing no interface terms, and is minimal in the
+// sense that removing any term changes the set of types satisfying the
+// constraint. It is left as a proof for the reader that, modulo sorting, there
+// is exactly one such normalized form.
+//
+// Because the minimal representation always takes this form, StructuralTerms
+// returns a slice of tilde terms corresponding to the terms of the union in
+// the normalized structural restriction. An error is returned if the
+// constraint interface is invalid, exceeds complexity bounds, or has an empty
+// type set. In the latter case, StructuralTerms returns ErrEmptyTypeSet.
+//
+// StructuralTerms makes no guarantees about the order of terms, except that it
+// is deterministic.
+func StructuralTerms(tparam *types.TypeParam) ([]*types.Term, error) {
+ constraint := tparam.Constraint()
+ if constraint == nil {
+ return nil, fmt.Errorf("%s has nil constraint", tparam)
+ }
+ iface, _ := constraint.Underlying().(*types.Interface)
+ if iface == nil {
+ return nil, fmt.Errorf("constraint is %T, not *types.Interface", constraint.Underlying())
+ }
+ return InterfaceTermSet(iface)
+}
+
+// InterfaceTermSet computes the normalized terms for a constraint interface,
+// returning an error if the term set cannot be computed or is empty. In the
+// latter case, the error will be ErrEmptyTypeSet.
+//
+// See the documentation of StructuralTerms for more information on
+// normalization.
+func InterfaceTermSet(iface *types.Interface) ([]*types.Term, error) {
+ return computeTermSet(iface)
+}
+
+// UnionTermSet computes the normalized terms for a union, returning an error
+// if the term set cannot be computed or is empty. In the latter case, the
+// error will be ErrEmptyTypeSet.
+//
+// See the documentation of StructuralTerms for more information on
+// normalization.
+func UnionTermSet(union *types.Union) ([]*types.Term, error) {
+ return computeTermSet(union)
+}
+
+func computeTermSet(typ types.Type) ([]*types.Term, error) {
+ tset, err := computeTermSetInternal(typ, make(map[types.Type]*termSet), 0)
+ if err != nil {
+ return nil, err
+ }
+ if tset.terms.isEmpty() {
+ return nil, ErrEmptyTypeSet
+ }
+ if tset.terms.isAll() {
+ return nil, nil
+ }
+ var terms []*types.Term
+ for _, term := range tset.terms {
+ terms = append(terms, types.NewTerm(term.tilde, term.typ))
+ }
+ return terms, nil
+}
+
+// A termSet holds the normalized set of terms for a given type.
+//
+// The name termSet is intentionally distinct from 'type set': a type set is
+// all types that implement a type (and includes method restrictions), whereas
+// a term set just represents the structural restrictions on a type.
+type termSet struct {
+ complete bool
+ terms termlist
+}
+
+func indentf(depth int, format string, args ...interface{}) {
+ fmt.Fprintf(os.Stderr, strings.Repeat(".", depth)+format+"\n", args...)
+}
+
+func computeTermSetInternal(t types.Type, seen map[types.Type]*termSet, depth int) (res *termSet, err error) {
+ if t == nil {
+ panic("nil type")
+ }
+
+ if debug {
+ indentf(depth, "%s", t.String())
+ defer func() {
+ if err != nil {
+ indentf(depth, "=> %s", err)
+ } else {
+ indentf(depth, "=> %s", res.terms.String())
+ }
+ }()
+ }
+
+ const maxTermCount = 100
+ if tset, ok := seen[t]; ok {
+ if !tset.complete {
+ return nil, fmt.Errorf("cycle detected in the declaration of %s", t)
+ }
+ return tset, nil
+ }
+
+ // Mark the current type as seen to avoid infinite recursion.
+ tset := new(termSet)
+ defer func() {
+ tset.complete = true
+ }()
+ seen[t] = tset
+
+ switch u := t.Underlying().(type) {
+ case *types.Interface:
+ // The term set of an interface is the intersection of the term sets of its
+ // embedded types.
+ tset.terms = allTermlist
+ for i := 0; i < u.NumEmbeddeds(); i++ {
+ embedded := u.EmbeddedType(i)
+ if _, ok := embedded.Underlying().(*types.TypeParam); ok {
+ return nil, fmt.Errorf("invalid embedded type %T", embedded)
+ }
+ tset2, err := computeTermSetInternal(embedded, seen, depth+1)
+ if err != nil {
+ return nil, err
+ }
+ tset.terms = tset.terms.intersect(tset2.terms)
+ }
+ case *types.Union:
+ // The term set of a union is the union of term sets of its terms.
+ tset.terms = nil
+ for i := 0; i < u.Len(); i++ {
+ t := u.Term(i)
+ var terms termlist
+ switch t.Type().Underlying().(type) {
+ case *types.Interface:
+ tset2, err := computeTermSetInternal(t.Type(), seen, depth+1)
+ if err != nil {
+ return nil, err
+ }
+ terms = tset2.terms
+ case *types.TypeParam, *types.Union:
+ // A stand-alone type parameter or union is not permitted as union
+ // term.
+ return nil, fmt.Errorf("invalid union term %T", t)
+ default:
+ if t.Type() == types.Typ[types.Invalid] {
+ continue
+ }
+ terms = termlist{{t.Tilde(), t.Type()}}
+ }
+ tset.terms = tset.terms.union(terms)
+ if len(tset.terms) > maxTermCount {
+ return nil, fmt.Errorf("exceeded max term count %d", maxTermCount)
+ }
+ }
+ case *types.TypeParam:
+ panic("unreachable")
+ default:
+ // For all other types, the term set is just a single non-tilde term
+ // holding the type itself.
+ if u != types.Typ[types.Invalid] {
+ tset.terms = termlist{{false, t}}
+ }
+ }
+ return tset, nil
+}
+
+// under is a facade for the go/types internal function of the same name. It is
+// used by typeterm.go.
+func under(t types.Type) types.Type {
+ return t.Underlying()
+}
diff --git a/vendor/golang.org/x/tools/internal/typeparams/termlist.go b/vendor/golang.org/x/tools/internal/typeparams/termlist.go
new file mode 100644
index 0000000000..cbd12f8013
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/typeparams/termlist.go
@@ -0,0 +1,163 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by copytermlist.go DO NOT EDIT.
+
+package typeparams
+
+import (
+ "bytes"
+ "go/types"
+)
+
+// A termlist represents the type set represented by the union
+// t1 ∪ y2 ∪ ... tn of the type sets of the terms t1 to tn.
+// A termlist is in normal form if all terms are disjoint.
+// termlist operations don't require the operands to be in
+// normal form.
+type termlist []*term
+
+// allTermlist represents the set of all types.
+// It is in normal form.
+var allTermlist = termlist{new(term)}
+
+// String prints the termlist exactly (without normalization).
+func (xl termlist) String() string {
+ if len(xl) == 0 {
+ return "∅"
+ }
+ var buf bytes.Buffer
+ for i, x := range xl {
+ if i > 0 {
+ buf.WriteString(" | ")
+ }
+ buf.WriteString(x.String())
+ }
+ return buf.String()
+}
+
+// isEmpty reports whether the termlist xl represents the empty set of types.
+func (xl termlist) isEmpty() bool {
+ // If there's a non-nil term, the entire list is not empty.
+ // If the termlist is in normal form, this requires at most
+ // one iteration.
+ for _, x := range xl {
+ if x != nil {
+ return false
+ }
+ }
+ return true
+}
+
+// isAll reports whether the termlist xl represents the set of all types.
+func (xl termlist) isAll() bool {
+ // If there's a 𝓤 term, the entire list is 𝓤.
+ // If the termlist is in normal form, this requires at most
+ // one iteration.
+ for _, x := range xl {
+ if x != nil && x.typ == nil {
+ return true
+ }
+ }
+ return false
+}
+
+// norm returns the normal form of xl.
+func (xl termlist) norm() termlist {
+ // Quadratic algorithm, but good enough for now.
+ // TODO(gri) fix asymptotic performance
+ used := make([]bool, len(xl))
+ var rl termlist
+ for i, xi := range xl {
+ if xi == nil || used[i] {
+ continue
+ }
+ for j := i + 1; j < len(xl); j++ {
+ xj := xl[j]
+ if xj == nil || used[j] {
+ continue
+ }
+ if u1, u2 := xi.union(xj); u2 == nil {
+ // If we encounter a 𝓤 term, the entire list is 𝓤.
+ // Exit early.
+ // (Note that this is not just an optimization;
+ // if we continue, we may end up with a 𝓤 term
+ // and other terms and the result would not be
+ // in normal form.)
+ if u1.typ == nil {
+ return allTermlist
+ }
+ xi = u1
+ used[j] = true // xj is now unioned into xi - ignore it in future iterations
+ }
+ }
+ rl = append(rl, xi)
+ }
+ return rl
+}
+
+// union returns the union xl ∪ yl.
+func (xl termlist) union(yl termlist) termlist {
+ return append(xl, yl...).norm()
+}
+
+// intersect returns the intersection xl ∩ yl.
+func (xl termlist) intersect(yl termlist) termlist {
+ if xl.isEmpty() || yl.isEmpty() {
+ return nil
+ }
+
+ // Quadratic algorithm, but good enough for now.
+ // TODO(gri) fix asymptotic performance
+ var rl termlist
+ for _, x := range xl {
+ for _, y := range yl {
+ if r := x.intersect(y); r != nil {
+ rl = append(rl, r)
+ }
+ }
+ }
+ return rl.norm()
+}
+
+// equal reports whether xl and yl represent the same type set.
+func (xl termlist) equal(yl termlist) bool {
+ // TODO(gri) this should be more efficient
+ return xl.subsetOf(yl) && yl.subsetOf(xl)
+}
+
+// includes reports whether t ∈ xl.
+func (xl termlist) includes(t types.Type) bool {
+ for _, x := range xl {
+ if x.includes(t) {
+ return true
+ }
+ }
+ return false
+}
+
+// supersetOf reports whether y ⊆ xl.
+func (xl termlist) supersetOf(y *term) bool {
+ for _, x := range xl {
+ if y.subsetOf(x) {
+ return true
+ }
+ }
+ return false
+}
+
+// subsetOf reports whether xl ⊆ yl.
+func (xl termlist) subsetOf(yl termlist) bool {
+ if yl.isEmpty() {
+ return xl.isEmpty()
+ }
+
+ // each term x of xl must be a subset of yl
+ for _, x := range xl {
+ if !yl.supersetOf(x) {
+ return false // x is not a subset yl
+ }
+ }
+ return true
+}
diff --git a/vendor/golang.org/x/tools/internal/typeparams/typeterm.go b/vendor/golang.org/x/tools/internal/typeparams/typeterm.go
new file mode 100644
index 0000000000..7350bb702a
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/typeparams/typeterm.go
@@ -0,0 +1,169 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by copytermlist.go DO NOT EDIT.
+
+package typeparams
+
+import "go/types"
+
+// A term describes elementary type sets:
+//
+// ∅: (*term)(nil) == ∅ // set of no types (empty set)
+// 𝓤: &term{} == 𝓤 // set of all types (𝓤niverse)
+// T: &term{false, T} == {T} // set of type T
+// ~t: &term{true, t} == {t' | under(t') == t} // set of types with underlying type t
+type term struct {
+ tilde bool // valid if typ != nil
+ typ types.Type
+}
+
+func (x *term) String() string {
+ switch {
+ case x == nil:
+ return "∅"
+ case x.typ == nil:
+ return "𝓤"
+ case x.tilde:
+ return "~" + x.typ.String()
+ default:
+ return x.typ.String()
+ }
+}
+
+// equal reports whether x and y represent the same type set.
+func (x *term) equal(y *term) bool {
+ // easy cases
+ switch {
+ case x == nil || y == nil:
+ return x == y
+ case x.typ == nil || y.typ == nil:
+ return x.typ == y.typ
+ }
+ // ∅ ⊂ x, y ⊂ 𝓤
+
+ return x.tilde == y.tilde && types.Identical(x.typ, y.typ)
+}
+
+// union returns the union x ∪ y: zero, one, or two non-nil terms.
+func (x *term) union(y *term) (_, _ *term) {
+ // easy cases
+ switch {
+ case x == nil && y == nil:
+ return nil, nil // ∅ ∪ ∅ == ∅
+ case x == nil:
+ return y, nil // ∅ ∪ y == y
+ case y == nil:
+ return x, nil // x ∪ ∅ == x
+ case x.typ == nil:
+ return x, nil // 𝓤 ∪ y == 𝓤
+ case y.typ == nil:
+ return y, nil // x ∪ 𝓤 == 𝓤
+ }
+ // ∅ ⊂ x, y ⊂ 𝓤
+
+ if x.disjoint(y) {
+ return x, y // x ∪ y == (x, y) if x ∩ y == ∅
+ }
+ // x.typ == y.typ
+
+ // ~t ∪ ~t == ~t
+ // ~t ∪ T == ~t
+ // T ∪ ~t == ~t
+ // T ∪ T == T
+ if x.tilde || !y.tilde {
+ return x, nil
+ }
+ return y, nil
+}
+
+// intersect returns the intersection x ∩ y.
+func (x *term) intersect(y *term) *term {
+ // easy cases
+ switch {
+ case x == nil || y == nil:
+ return nil // ∅ ∩ y == ∅ and ∩ ∅ == ∅
+ case x.typ == nil:
+ return y // 𝓤 ∩ y == y
+ case y.typ == nil:
+ return x // x ∩ 𝓤 == x
+ }
+ // ∅ ⊂ x, y ⊂ 𝓤
+
+ if x.disjoint(y) {
+ return nil // x ∩ y == ∅ if x ∩ y == ∅
+ }
+ // x.typ == y.typ
+
+ // ~t ∩ ~t == ~t
+ // ~t ∩ T == T
+ // T ∩ ~t == T
+ // T ∩ T == T
+ if !x.tilde || y.tilde {
+ return x
+ }
+ return y
+}
+
+// includes reports whether t ∈ x.
+func (x *term) includes(t types.Type) bool {
+ // easy cases
+ switch {
+ case x == nil:
+ return false // t ∈ ∅ == false
+ case x.typ == nil:
+ return true // t ∈ 𝓤 == true
+ }
+ // ∅ ⊂ x ⊂ 𝓤
+
+ u := t
+ if x.tilde {
+ u = under(u)
+ }
+ return types.Identical(x.typ, u)
+}
+
+// subsetOf reports whether x ⊆ y.
+func (x *term) subsetOf(y *term) bool {
+ // easy cases
+ switch {
+ case x == nil:
+ return true // ∅ ⊆ y == true
+ case y == nil:
+ return false // x ⊆ ∅ == false since x != ∅
+ case y.typ == nil:
+ return true // x ⊆ 𝓤 == true
+ case x.typ == nil:
+ return false // 𝓤 ⊆ y == false since y != 𝓤
+ }
+ // ∅ ⊂ x, y ⊂ 𝓤
+
+ if x.disjoint(y) {
+ return false // x ⊆ y == false if x ∩ y == ∅
+ }
+ // x.typ == y.typ
+
+ // ~t ⊆ ~t == true
+ // ~t ⊆ T == false
+ // T ⊆ ~t == true
+ // T ⊆ T == true
+ return !x.tilde || y.tilde
+}
+
+// disjoint reports whether x ∩ y == ∅.
+// x.typ and y.typ must not be nil.
+func (x *term) disjoint(y *term) bool {
+ if debug && (x.typ == nil || y.typ == nil) {
+ panic("invalid argument(s)")
+ }
+ ux := x.typ
+ if y.tilde {
+ ux = under(ux)
+ }
+ uy := y.typ
+ if x.tilde {
+ uy = under(uy)
+ }
+ return !types.Identical(ux, uy)
+}
diff --git a/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go b/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go
new file mode 100644
index 0000000000..07484073a5
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go
@@ -0,0 +1,1560 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typesinternal
+
+//go:generate stringer -type=ErrorCode
+
+type ErrorCode int
+
+// This file defines the error codes that can be produced during type-checking.
+// Collectively, these codes provide an identifier that may be used to
+// implement special handling for certain types of errors.
+//
+// Error codes should be fine-grained enough that the exact nature of the error
+// can be easily determined, but coarse enough that they are not an
+// implementation detail of the type checking algorithm. As a rule-of-thumb,
+// errors should be considered equivalent if there is a theoretical refactoring
+// of the type checker in which they are emitted in exactly one place. For
+// example, the type checker emits different error messages for "too many
+// arguments" and "too few arguments", but one can imagine an alternative type
+// checker where this check instead just emits a single "wrong number of
+// arguments", so these errors should have the same code.
+//
+// Error code names should be as brief as possible while retaining accuracy and
+// distinctiveness. In most cases names should start with an adjective
+// describing the nature of the error (e.g. "invalid", "unused", "misplaced"),
+// and end with a noun identifying the relevant language object. For example,
+// "DuplicateDecl" or "InvalidSliceExpr". For brevity, naming follows the
+// convention that "bad" implies a problem with syntax, and "invalid" implies a
+// problem with types.
+
+const (
+ // InvalidSyntaxTree occurs if an invalid syntax tree is provided
+ // to the type checker. It should never happen.
+ InvalidSyntaxTree ErrorCode = -1
+)
+
+const (
+ _ ErrorCode = iota
+
+ // Test is reserved for errors that only apply while in self-test mode.
+ Test
+
+ /* package names */
+
+ // BlankPkgName occurs when a package name is the blank identifier "_".
+ //
+ // Per the spec:
+ // "The PackageName must not be the blank identifier."
+ BlankPkgName
+
+ // MismatchedPkgName occurs when a file's package name doesn't match the
+ // package name already established by other files.
+ MismatchedPkgName
+
+ // InvalidPkgUse occurs when a package identifier is used outside of a
+ // selector expression.
+ //
+ // Example:
+ // import "fmt"
+ //
+ // var _ = fmt
+ InvalidPkgUse
+
+ /* imports */
+
+ // BadImportPath occurs when an import path is not valid.
+ BadImportPath
+
+ // BrokenImport occurs when importing a package fails.
+ //
+ // Example:
+ // import "amissingpackage"
+ BrokenImport
+
+ // ImportCRenamed occurs when the special import "C" is renamed. "C" is a
+ // pseudo-package, and must not be renamed.
+ //
+ // Example:
+ // import _ "C"
+ ImportCRenamed
+
+ // UnusedImport occurs when an import is unused.
+ //
+ // Example:
+ // import "fmt"
+ //
+ // func main() {}
+ UnusedImport
+
+ /* initialization */
+
+ // InvalidInitCycle occurs when an invalid cycle is detected within the
+ // initialization graph.
+ //
+ // Example:
+ // var x int = f()
+ //
+ // func f() int { return x }
+ InvalidInitCycle
+
+ /* decls */
+
+ // DuplicateDecl occurs when an identifier is declared multiple times.
+ //
+ // Example:
+ // var x = 1
+ // var x = 2
+ DuplicateDecl
+
+ // InvalidDeclCycle occurs when a declaration cycle is not valid.
+ //
+ // Example:
+ // import "unsafe"
+ //
+ // type T struct {
+ // a [n]int
+ // }
+ //
+ // var n = unsafe.Sizeof(T{})
+ InvalidDeclCycle
+
+ // InvalidTypeCycle occurs when a cycle in type definitions results in a
+ // type that is not well-defined.
+ //
+ // Example:
+ // import "unsafe"
+ //
+ // type T [unsafe.Sizeof(T{})]int
+ InvalidTypeCycle
+
+ /* decls > const */
+
+ // InvalidConstInit occurs when a const declaration has a non-constant
+ // initializer.
+ //
+ // Example:
+ // var x int
+ // const _ = x
+ InvalidConstInit
+
+ // InvalidConstVal occurs when a const value cannot be converted to its
+ // target type.
+ //
+ // TODO(findleyr): this error code and example are not very clear. Consider
+ // removing it.
+ //
+ // Example:
+ // const _ = 1 << "hello"
+ InvalidConstVal
+
+ // InvalidConstType occurs when the underlying type in a const declaration
+ // is not a valid constant type.
+ //
+ // Example:
+ // const c *int = 4
+ InvalidConstType
+
+ /* decls > var (+ other variable assignment codes) */
+
+ // UntypedNilUse occurs when the predeclared (untyped) value nil is used to
+ // initialize a variable declared without an explicit type.
+ //
+ // Example:
+ // var x = nil
+ UntypedNilUse
+
+ // WrongAssignCount occurs when the number of values on the right-hand side
+ // of an assignment or or initialization expression does not match the number
+ // of variables on the left-hand side.
+ //
+ // Example:
+ // var x = 1, 2
+ WrongAssignCount
+
+ // UnassignableOperand occurs when the left-hand side of an assignment is
+ // not assignable.
+ //
+ // Example:
+ // func f() {
+ // const c = 1
+ // c = 2
+ // }
+ UnassignableOperand
+
+ // NoNewVar occurs when a short variable declaration (':=') does not declare
+ // new variables.
+ //
+ // Example:
+ // func f() {
+ // x := 1
+ // x := 2
+ // }
+ NoNewVar
+
+ // MultiValAssignOp occurs when an assignment operation (+=, *=, etc) does
+ // not have single-valued left-hand or right-hand side.
+ //
+ // Per the spec:
+ // "In assignment operations, both the left- and right-hand expression lists
+ // must contain exactly one single-valued expression"
+ //
+ // Example:
+ // func f() int {
+ // x, y := 1, 2
+ // x, y += 1
+ // return x + y
+ // }
+ MultiValAssignOp
+
+ // InvalidIfaceAssign occurs when a value of type T is used as an
+ // interface, but T does not implement a method of the expected interface.
+ //
+ // Example:
+ // type I interface {
+ // f()
+ // }
+ //
+ // type T int
+ //
+ // var x I = T(1)
+ InvalidIfaceAssign
+
+ // InvalidChanAssign occurs when a chan assignment is invalid.
+ //
+ // Per the spec, a value x is assignable to a channel type T if:
+ // "x is a bidirectional channel value, T is a channel type, x's type V and
+ // T have identical element types, and at least one of V or T is not a
+ // defined type."
+ //
+ // Example:
+ // type T1 chan int
+ // type T2 chan int
+ //
+ // var x T1
+ // // Invalid assignment because both types are named
+ // var _ T2 = x
+ InvalidChanAssign
+
+ // IncompatibleAssign occurs when the type of the right-hand side expression
+ // in an assignment cannot be assigned to the type of the variable being
+ // assigned.
+ //
+ // Example:
+ // var x []int
+ // var _ int = x
+ IncompatibleAssign
+
+ // UnaddressableFieldAssign occurs when trying to assign to a struct field
+ // in a map value.
+ //
+ // Example:
+ // func f() {
+ // m := make(map[string]struct{i int})
+ // m["foo"].i = 42
+ // }
+ UnaddressableFieldAssign
+
+ /* decls > type (+ other type expression codes) */
+
+ // NotAType occurs when the identifier used as the underlying type in a type
+ // declaration or the right-hand side of a type alias does not denote a type.
+ //
+ // Example:
+ // var S = 2
+ //
+ // type T S
+ NotAType
+
+ // InvalidArrayLen occurs when an array length is not a constant value.
+ //
+ // Example:
+ // var n = 3
+ // var _ = [n]int{}
+ InvalidArrayLen
+
+ // BlankIfaceMethod occurs when a method name is '_'.
+ //
+ // Per the spec:
+ // "The name of each explicitly specified method must be unique and not
+ // blank."
+ //
+ // Example:
+ // type T interface {
+ // _(int)
+ // }
+ BlankIfaceMethod
+
+ // IncomparableMapKey occurs when a map key type does not support the == and
+ // != operators.
+ //
+ // Per the spec:
+ // "The comparison operators == and != must be fully defined for operands of
+ // the key type; thus the key type must not be a function, map, or slice."
+ //
+ // Example:
+ // var x map[T]int
+ //
+ // type T []int
+ IncomparableMapKey
+
+ // InvalidIfaceEmbed occurs when a non-interface type is embedded in an
+ // interface.
+ //
+ // Example:
+ // type T struct {}
+ //
+ // func (T) m()
+ //
+ // type I interface {
+ // T
+ // }
+ InvalidIfaceEmbed
+
+ // InvalidPtrEmbed occurs when an embedded field is of the pointer form *T,
+ // and T itself is itself a pointer, an unsafe.Pointer, or an interface.
+ //
+ // Per the spec:
+ // "An embedded field must be specified as a type name T or as a pointer to
+ // a non-interface type name *T, and T itself may not be a pointer type."
+ //
+ // Example:
+ // type T *int
+ //
+ // type S struct {
+ // *T
+ // }
+ InvalidPtrEmbed
+
+ /* decls > func and method */
+
+ // BadRecv occurs when a method declaration does not have exactly one
+ // receiver parameter.
+ //
+ // Example:
+ // func () _() {}
+ BadRecv
+
+ // InvalidRecv occurs when a receiver type expression is not of the form T
+ // or *T, or T is a pointer type.
+ //
+ // Example:
+ // type T struct {}
+ //
+ // func (**T) m() {}
+ InvalidRecv
+
+ // DuplicateFieldAndMethod occurs when an identifier appears as both a field
+ // and method name.
+ //
+ // Example:
+ // type T struct {
+ // m int
+ // }
+ //
+ // func (T) m() {}
+ DuplicateFieldAndMethod
+
+ // DuplicateMethod occurs when two methods on the same receiver type have
+ // the same name.
+ //
+ // Example:
+ // type T struct {}
+ // func (T) m() {}
+ // func (T) m(i int) int { return i }
+ DuplicateMethod
+
+ /* decls > special */
+
+ // InvalidBlank occurs when a blank identifier is used as a value or type.
+ //
+ // Per the spec:
+ // "The blank identifier may appear as an operand only on the left-hand side
+ // of an assignment."
+ //
+ // Example:
+ // var x = _
+ InvalidBlank
+
+ // InvalidIota occurs when the predeclared identifier iota is used outside
+ // of a constant declaration.
+ //
+ // Example:
+ // var x = iota
+ InvalidIota
+
+ // MissingInitBody occurs when an init function is missing its body.
+ //
+ // Example:
+ // func init()
+ MissingInitBody
+
+ // InvalidInitSig occurs when an init function declares parameters or
+ // results.
+ //
+ // Example:
+ // func init() int { return 1 }
+ InvalidInitSig
+
+ // InvalidInitDecl occurs when init is declared as anything other than a
+ // function.
+ //
+ // Example:
+ // var init = 1
+ InvalidInitDecl
+
+ // InvalidMainDecl occurs when main is declared as anything other than a
+ // function, in a main package.
+ InvalidMainDecl
+
+ /* exprs */
+
+ // TooManyValues occurs when a function returns too many values for the
+ // expression context in which it is used.
+ //
+ // Example:
+ // func ReturnTwo() (int, int) {
+ // return 1, 2
+ // }
+ //
+ // var x = ReturnTwo()
+ TooManyValues
+
+ // NotAnExpr occurs when a type expression is used where a value expression
+ // is expected.
+ //
+ // Example:
+ // type T struct {}
+ //
+ // func f() {
+ // T
+ // }
+ NotAnExpr
+
+ /* exprs > const */
+
+ // TruncatedFloat occurs when a float constant is truncated to an integer
+ // value.
+ //
+ // Example:
+ // var _ int = 98.6
+ TruncatedFloat
+
+ // NumericOverflow occurs when a numeric constant overflows its target type.
+ //
+ // Example:
+ // var x int8 = 1000
+ NumericOverflow
+
+ /* exprs > operation */
+
+ // UndefinedOp occurs when an operator is not defined for the type(s) used
+ // in an operation.
+ //
+ // Example:
+ // var c = "a" - "b"
+ UndefinedOp
+
+ // MismatchedTypes occurs when operand types are incompatible in a binary
+ // operation.
+ //
+ // Example:
+ // var a = "hello"
+ // var b = 1
+ // var c = a - b
+ MismatchedTypes
+
+ // DivByZero occurs when a division operation is provable at compile
+ // time to be a division by zero.
+ //
+ // Example:
+ // const divisor = 0
+ // var x int = 1/divisor
+ DivByZero
+
+ // NonNumericIncDec occurs when an increment or decrement operator is
+ // applied to a non-numeric value.
+ //
+ // Example:
+ // func f() {
+ // var c = "c"
+ // c++
+ // }
+ NonNumericIncDec
+
+ /* exprs > ptr */
+
+ // UnaddressableOperand occurs when the & operator is applied to an
+ // unaddressable expression.
+ //
+ // Example:
+ // var x = &1
+ UnaddressableOperand
+
+ // InvalidIndirection occurs when a non-pointer value is indirected via the
+ // '*' operator.
+ //
+ // Example:
+ // var x int
+ // var y = *x
+ InvalidIndirection
+
+ /* exprs > [] */
+
+ // NonIndexableOperand occurs when an index operation is applied to a value
+ // that cannot be indexed.
+ //
+ // Example:
+ // var x = 1
+ // var y = x[1]
+ NonIndexableOperand
+
+ // InvalidIndex occurs when an index argument is not of integer type,
+ // negative, or out-of-bounds.
+ //
+ // Example:
+ // var s = [...]int{1,2,3}
+ // var x = s[5]
+ //
+ // Example:
+ // var s = []int{1,2,3}
+ // var _ = s[-1]
+ //
+ // Example:
+ // var s = []int{1,2,3}
+ // var i string
+ // var _ = s[i]
+ InvalidIndex
+
+ // SwappedSliceIndices occurs when constant indices in a slice expression
+ // are decreasing in value.
+ //
+ // Example:
+ // var _ = []int{1,2,3}[2:1]
+ SwappedSliceIndices
+
+ /* operators > slice */
+
+ // NonSliceableOperand occurs when a slice operation is applied to a value
+ // whose type is not sliceable, or is unaddressable.
+ //
+ // Example:
+ // var x = [...]int{1, 2, 3}[:1]
+ //
+ // Example:
+ // var x = 1
+ // var y = 1[:1]
+ NonSliceableOperand
+
+ // InvalidSliceExpr occurs when a three-index slice expression (a[x:y:z]) is
+ // applied to a string.
+ //
+ // Example:
+ // var s = "hello"
+ // var x = s[1:2:3]
+ InvalidSliceExpr
+
+ /* exprs > shift */
+
+ // InvalidShiftCount occurs when the right-hand side of a shift operation is
+ // either non-integer, negative, or too large.
+ //
+ // Example:
+ // var (
+ // x string
+ // y int = 1 << x
+ // )
+ InvalidShiftCount
+
+ // InvalidShiftOperand occurs when the shifted operand is not an integer.
+ //
+ // Example:
+ // var s = "hello"
+ // var x = s << 2
+ InvalidShiftOperand
+
+ /* exprs > chan */
+
+ // InvalidReceive occurs when there is a channel receive from a value that
+ // is either not a channel, or is a send-only channel.
+ //
+ // Example:
+ // func f() {
+ // var x = 1
+ // <-x
+ // }
+ InvalidReceive
+
+ // InvalidSend occurs when there is a channel send to a value that is not a
+ // channel, or is a receive-only channel.
+ //
+ // Example:
+ // func f() {
+ // var x = 1
+ // x <- "hello!"
+ // }
+ InvalidSend
+
+ /* exprs > literal */
+
+ // DuplicateLitKey occurs when an index is duplicated in a slice, array, or
+ // map literal.
+ //
+ // Example:
+ // var _ = []int{0:1, 0:2}
+ //
+ // Example:
+ // var _ = map[string]int{"a": 1, "a": 2}
+ DuplicateLitKey
+
+ // MissingLitKey occurs when a map literal is missing a key expression.
+ //
+ // Example:
+ // var _ = map[string]int{1}
+ MissingLitKey
+
+ // InvalidLitIndex occurs when the key in a key-value element of a slice or
+ // array literal is not an integer constant.
+ //
+ // Example:
+ // var i = 0
+ // var x = []string{i: "world"}
+ InvalidLitIndex
+
+ // OversizeArrayLit occurs when an array literal exceeds its length.
+ //
+ // Example:
+ // var _ = [2]int{1,2,3}
+ OversizeArrayLit
+
+ // MixedStructLit occurs when a struct literal contains a mix of positional
+ // and named elements.
+ //
+ // Example:
+ // var _ = struct{i, j int}{i: 1, 2}
+ MixedStructLit
+
+ // InvalidStructLit occurs when a positional struct literal has an incorrect
+ // number of values.
+ //
+ // Example:
+ // var _ = struct{i, j int}{1,2,3}
+ InvalidStructLit
+
+ // MissingLitField occurs when a struct literal refers to a field that does
+ // not exist on the struct type.
+ //
+ // Example:
+ // var _ = struct{i int}{j: 2}
+ MissingLitField
+
+ // DuplicateLitField occurs when a struct literal contains duplicated
+ // fields.
+ //
+ // Example:
+ // var _ = struct{i int}{i: 1, i: 2}
+ DuplicateLitField
+
+ // UnexportedLitField occurs when a positional struct literal implicitly
+ // assigns an unexported field of an imported type.
+ UnexportedLitField
+
+ // InvalidLitField occurs when a field name is not a valid identifier.
+ //
+ // Example:
+ // var _ = struct{i int}{1: 1}
+ InvalidLitField
+
+ // UntypedLit occurs when a composite literal omits a required type
+ // identifier.
+ //
+ // Example:
+ // type outer struct{
+ // inner struct { i int }
+ // }
+ //
+ // var _ = outer{inner: {1}}
+ UntypedLit
+
+ // InvalidLit occurs when a composite literal expression does not match its
+ // type.
+ //
+ // Example:
+ // type P *struct{
+ // x int
+ // }
+ // var _ = P {}
+ InvalidLit
+
+ /* exprs > selector */
+
+ // AmbiguousSelector occurs when a selector is ambiguous.
+ //
+ // Example:
+ // type E1 struct { i int }
+ // type E2 struct { i int }
+ // type T struct { E1; E2 }
+ //
+ // var x T
+ // var _ = x.i
+ AmbiguousSelector
+
+ // UndeclaredImportedName occurs when a package-qualified identifier is
+ // undeclared by the imported package.
+ //
+ // Example:
+ // import "go/types"
+ //
+ // var _ = types.NotAnActualIdentifier
+ UndeclaredImportedName
+
+ // UnexportedName occurs when a selector refers to an unexported identifier
+ // of an imported package.
+ //
+ // Example:
+ // import "reflect"
+ //
+ // type _ reflect.flag
+ UnexportedName
+
+ // UndeclaredName occurs when an identifier is not declared in the current
+ // scope.
+ //
+ // Example:
+ // var x T
+ UndeclaredName
+
+ // MissingFieldOrMethod occurs when a selector references a field or method
+ // that does not exist.
+ //
+ // Example:
+ // type T struct {}
+ //
+ // var x = T{}.f
+ MissingFieldOrMethod
+
+ /* exprs > ... */
+
+ // BadDotDotDotSyntax occurs when a "..." occurs in a context where it is
+ // not valid.
+ //
+ // Example:
+ // var _ = map[int][...]int{0: {}}
+ BadDotDotDotSyntax
+
+ // NonVariadicDotDotDot occurs when a "..." is used on the final argument to
+ // a non-variadic function.
+ //
+ // Example:
+ // func printArgs(s []string) {
+ // for _, a := range s {
+ // println(a)
+ // }
+ // }
+ //
+ // func f() {
+ // s := []string{"a", "b", "c"}
+ // printArgs(s...)
+ // }
+ NonVariadicDotDotDot
+
+ // MisplacedDotDotDot occurs when a "..." is used somewhere other than the
+ // final argument to a function call.
+ //
+ // Example:
+ // func printArgs(args ...int) {
+ // for _, a := range args {
+ // println(a)
+ // }
+ // }
+ //
+ // func f() {
+ // a := []int{1,2,3}
+ // printArgs(0, a...)
+ // }
+ MisplacedDotDotDot
+
+ // InvalidDotDotDotOperand occurs when a "..." operator is applied to a
+ // single-valued operand.
+ //
+ // Example:
+ // func printArgs(args ...int) {
+ // for _, a := range args {
+ // println(a)
+ // }
+ // }
+ //
+ // func f() {
+ // a := 1
+ // printArgs(a...)
+ // }
+ //
+ // Example:
+ // func args() (int, int) {
+ // return 1, 2
+ // }
+ //
+ // func printArgs(args ...int) {
+ // for _, a := range args {
+ // println(a)
+ // }
+ // }
+ //
+ // func g() {
+ // printArgs(args()...)
+ // }
+ InvalidDotDotDotOperand
+
+ // InvalidDotDotDot occurs when a "..." is used in a non-variadic built-in
+ // function.
+ //
+ // Example:
+ // var s = []int{1, 2, 3}
+ // var l = len(s...)
+ InvalidDotDotDot
+
+ /* exprs > built-in */
+
+ // UncalledBuiltin occurs when a built-in function is used as a
+ // function-valued expression, instead of being called.
+ //
+ // Per the spec:
+ // "The built-in functions do not have standard Go types, so they can only
+ // appear in call expressions; they cannot be used as function values."
+ //
+ // Example:
+ // var _ = copy
+ UncalledBuiltin
+
+ // InvalidAppend occurs when append is called with a first argument that is
+ // not a slice.
+ //
+ // Example:
+ // var _ = append(1, 2)
+ InvalidAppend
+
+ // InvalidCap occurs when an argument to the cap built-in function is not of
+ // supported type.
+ //
+ // See https://golang.org/ref/spec#Lengthand_capacity for information on
+ // which underlying types are supported as arguments to cap and len.
+ //
+ // Example:
+ // var s = 2
+ // var x = cap(s)
+ InvalidCap
+
+ // InvalidClose occurs when close(...) is called with an argument that is
+ // not of channel type, or that is a receive-only channel.
+ //
+ // Example:
+ // func f() {
+ // var x int
+ // close(x)
+ // }
+ InvalidClose
+
+ // InvalidCopy occurs when the arguments are not of slice type or do not
+ // have compatible type.
+ //
+ // See https://golang.org/ref/spec#Appendingand_copying_slices for more
+ // information on the type requirements for the copy built-in.
+ //
+ // Example:
+ // func f() {
+ // var x []int
+ // y := []int64{1,2,3}
+ // copy(x, y)
+ // }
+ InvalidCopy
+
+ // InvalidComplex occurs when the complex built-in function is called with
+ // arguments with incompatible types.
+ //
+ // Example:
+ // var _ = complex(float32(1), float64(2))
+ InvalidComplex
+
+ // InvalidDelete occurs when the delete built-in function is called with a
+ // first argument that is not a map.
+ //
+ // Example:
+ // func f() {
+ // m := "hello"
+ // delete(m, "e")
+ // }
+ InvalidDelete
+
+ // InvalidImag occurs when the imag built-in function is called with an
+ // argument that does not have complex type.
+ //
+ // Example:
+ // var _ = imag(int(1))
+ InvalidImag
+
+ // InvalidLen occurs when an argument to the len built-in function is not of
+ // supported type.
+ //
+ // See https://golang.org/ref/spec#Lengthand_capacity for information on
+ // which underlying types are supported as arguments to cap and len.
+ //
+ // Example:
+ // var s = 2
+ // var x = len(s)
+ InvalidLen
+
+ // SwappedMakeArgs occurs when make is called with three arguments, and its
+ // length argument is larger than its capacity argument.
+ //
+ // Example:
+ // var x = make([]int, 3, 2)
+ SwappedMakeArgs
+
+ // InvalidMake occurs when make is called with an unsupported type argument.
+ //
+ // See https://golang.org/ref/spec#Makingslices_maps_and_channels for
+ // information on the types that may be created using make.
+ //
+ // Example:
+ // var x = make(int)
+ InvalidMake
+
+ // InvalidReal occurs when the real built-in function is called with an
+ // argument that does not have complex type.
+ //
+ // Example:
+ // var _ = real(int(1))
+ InvalidReal
+
+ /* exprs > assertion */
+
+ // InvalidAssert occurs when a type assertion is applied to a
+ // value that is not of interface type.
+ //
+ // Example:
+ // var x = 1
+ // var _ = x.(float64)
+ InvalidAssert
+
+ // ImpossibleAssert occurs for a type assertion x.(T) when the value x of
+ // interface cannot have dynamic type T, due to a missing or mismatching
+ // method on T.
+ //
+ // Example:
+ // type T int
+ //
+ // func (t *T) m() int { return int(*t) }
+ //
+ // type I interface { m() int }
+ //
+ // var x I
+ // var _ = x.(T)
+ ImpossibleAssert
+
+ /* exprs > conversion */
+
+ // InvalidConversion occurs when the argument type cannot be converted to the
+ // target.
+ //
+ // See https://golang.org/ref/spec#Conversions for the rules of
+ // convertibility.
+ //
+ // Example:
+ // var x float64
+ // var _ = string(x)
+ InvalidConversion
+
+ // InvalidUntypedConversion occurs when an there is no valid implicit
+ // conversion from an untyped value satisfying the type constraints of the
+ // context in which it is used.
+ //
+ // Example:
+ // var _ = 1 + ""
+ InvalidUntypedConversion
+
+ /* offsetof */
+
+ // BadOffsetofSyntax occurs when unsafe.Offsetof is called with an argument
+ // that is not a selector expression.
+ //
+ // Example:
+ // import "unsafe"
+ //
+ // var x int
+ // var _ = unsafe.Offsetof(x)
+ BadOffsetofSyntax
+
+ // InvalidOffsetof occurs when unsafe.Offsetof is called with a method
+ // selector, rather than a field selector, or when the field is embedded via
+ // a pointer.
+ //
+ // Per the spec:
+ //
+ // "If f is an embedded field, it must be reachable without pointer
+ // indirections through fields of the struct. "
+ //
+ // Example:
+ // import "unsafe"
+ //
+ // type T struct { f int }
+ // type S struct { *T }
+ // var s S
+ // var _ = unsafe.Offsetof(s.f)
+ //
+ // Example:
+ // import "unsafe"
+ //
+ // type S struct{}
+ //
+ // func (S) m() {}
+ //
+ // var s S
+ // var _ = unsafe.Offsetof(s.m)
+ InvalidOffsetof
+
+ /* control flow > scope */
+
+ // UnusedExpr occurs when a side-effect free expression is used as a
+ // statement. Such a statement has no effect.
+ //
+ // Example:
+ // func f(i int) {
+ // i*i
+ // }
+ UnusedExpr
+
+ // UnusedVar occurs when a variable is declared but unused.
+ //
+ // Example:
+ // func f() {
+ // x := 1
+ // }
+ UnusedVar
+
+ // MissingReturn occurs when a function with results is missing a return
+ // statement.
+ //
+ // Example:
+ // func f() int {}
+ MissingReturn
+
+ // WrongResultCount occurs when a return statement returns an incorrect
+ // number of values.
+ //
+ // Example:
+ // func ReturnOne() int {
+ // return 1, 2
+ // }
+ WrongResultCount
+
+ // OutOfScopeResult occurs when the name of a value implicitly returned by
+ // an empty return statement is shadowed in a nested scope.
+ //
+ // Example:
+ // func factor(n int) (i int) {
+ // for i := 2; i < n; i++ {
+ // if n%i == 0 {
+ // return
+ // }
+ // }
+ // return 0
+ // }
+ OutOfScopeResult
+
+ /* control flow > if */
+
+ // InvalidCond occurs when an if condition is not a boolean expression.
+ //
+ // Example:
+ // func checkReturn(i int) {
+ // if i {
+ // panic("non-zero return")
+ // }
+ // }
+ InvalidCond
+
+ /* control flow > for */
+
+ // InvalidPostDecl occurs when there is a declaration in a for-loop post
+ // statement.
+ //
+ // Example:
+ // func f() {
+ // for i := 0; i < 10; j := 0 {}
+ // }
+ InvalidPostDecl
+
+ // InvalidChanRange occurs when a send-only channel used in a range
+ // expression.
+ //
+ // Example:
+ // func sum(c chan<- int) {
+ // s := 0
+ // for i := range c {
+ // s += i
+ // }
+ // }
+ InvalidChanRange
+
+ // InvalidIterVar occurs when two iteration variables are used while ranging
+ // over a channel.
+ //
+ // Example:
+ // func f(c chan int) {
+ // for k, v := range c {
+ // println(k, v)
+ // }
+ // }
+ InvalidIterVar
+
+ // InvalidRangeExpr occurs when the type of a range expression is not array,
+ // slice, string, map, or channel.
+ //
+ // Example:
+ // func f(i int) {
+ // for j := range i {
+ // println(j)
+ // }
+ // }
+ InvalidRangeExpr
+
+ /* control flow > switch */
+
+ // MisplacedBreak occurs when a break statement is not within a for, switch,
+ // or select statement of the innermost function definition.
+ //
+ // Example:
+ // func f() {
+ // break
+ // }
+ MisplacedBreak
+
+ // MisplacedContinue occurs when a continue statement is not within a for
+ // loop of the innermost function definition.
+ //
+ // Example:
+ // func sumeven(n int) int {
+ // proceed := func() {
+ // continue
+ // }
+ // sum := 0
+ // for i := 1; i <= n; i++ {
+ // if i % 2 != 0 {
+ // proceed()
+ // }
+ // sum += i
+ // }
+ // return sum
+ // }
+ MisplacedContinue
+
+ // MisplacedFallthrough occurs when a fallthrough statement is not within an
+ // expression switch.
+ //
+ // Example:
+ // func typename(i interface{}) string {
+ // switch i.(type) {
+ // case int64:
+ // fallthrough
+ // case int:
+ // return "int"
+ // }
+ // return "unsupported"
+ // }
+ MisplacedFallthrough
+
+ // DuplicateCase occurs when a type or expression switch has duplicate
+ // cases.
+ //
+ // Example:
+ // func printInt(i int) {
+ // switch i {
+ // case 1:
+ // println("one")
+ // case 1:
+ // println("One")
+ // }
+ // }
+ DuplicateCase
+
+ // DuplicateDefault occurs when a type or expression switch has multiple
+ // default clauses.
+ //
+ // Example:
+ // func printInt(i int) {
+ // switch i {
+ // case 1:
+ // println("one")
+ // default:
+ // println("One")
+ // default:
+ // println("1")
+ // }
+ // }
+ DuplicateDefault
+
+ // BadTypeKeyword occurs when a .(type) expression is used anywhere other
+ // than a type switch.
+ //
+ // Example:
+ // type I interface {
+ // m()
+ // }
+ // var t I
+ // var _ = t.(type)
+ BadTypeKeyword
+
+ // InvalidTypeSwitch occurs when .(type) is used on an expression that is
+ // not of interface type.
+ //
+ // Example:
+ // func f(i int) {
+ // switch x := i.(type) {}
+ // }
+ InvalidTypeSwitch
+
+ // InvalidExprSwitch occurs when a switch expression is not comparable.
+ //
+ // Example:
+ // func _() {
+ // var a struct{ _ func() }
+ // switch a /* ERROR cannot switch on a */ {
+ // }
+ // }
+ InvalidExprSwitch
+
+ /* control flow > select */
+
+ // InvalidSelectCase occurs when a select case is not a channel send or
+ // receive.
+ //
+ // Example:
+ // func checkChan(c <-chan int) bool {
+ // select {
+ // case c:
+ // return true
+ // default:
+ // return false
+ // }
+ // }
+ InvalidSelectCase
+
+ /* control flow > labels and jumps */
+
+ // UndeclaredLabel occurs when an undeclared label is jumped to.
+ //
+ // Example:
+ // func f() {
+ // goto L
+ // }
+ UndeclaredLabel
+
+ // DuplicateLabel occurs when a label is declared more than once.
+ //
+ // Example:
+ // func f() int {
+ // L:
+ // L:
+ // return 1
+ // }
+ DuplicateLabel
+
+ // MisplacedLabel occurs when a break or continue label is not on a for,
+ // switch, or select statement.
+ //
+ // Example:
+ // func f() {
+ // L:
+ // a := []int{1,2,3}
+ // for _, e := range a {
+ // if e > 10 {
+ // break L
+ // }
+ // println(a)
+ // }
+ // }
+ MisplacedLabel
+
+ // UnusedLabel occurs when a label is declared but not used.
+ //
+ // Example:
+ // func f() {
+ // L:
+ // }
+ UnusedLabel
+
+ // JumpOverDecl occurs when a label jumps over a variable declaration.
+ //
+ // Example:
+ // func f() int {
+ // goto L
+ // x := 2
+ // L:
+ // x++
+ // return x
+ // }
+ JumpOverDecl
+
+ // JumpIntoBlock occurs when a forward jump goes to a label inside a nested
+ // block.
+ //
+ // Example:
+ // func f(x int) {
+ // goto L
+ // if x > 0 {
+ // L:
+ // print("inside block")
+ // }
+ // }
+ JumpIntoBlock
+
+ /* control flow > calls */
+
+ // InvalidMethodExpr occurs when a pointer method is called but the argument
+ // is not addressable.
+ //
+ // Example:
+ // type T struct {}
+ //
+ // func (*T) m() int { return 1 }
+ //
+ // var _ = T.m(T{})
+ InvalidMethodExpr
+
+ // WrongArgCount occurs when too few or too many arguments are passed by a
+ // function call.
+ //
+ // Example:
+ // func f(i int) {}
+ // var x = f()
+ WrongArgCount
+
+ // InvalidCall occurs when an expression is called that is not of function
+ // type.
+ //
+ // Example:
+ // var x = "x"
+ // var y = x()
+ InvalidCall
+
+ /* control flow > suspended */
+
+ // UnusedResults occurs when a restricted expression-only built-in function
+ // is suspended via go or defer. Such a suspension discards the results of
+ // these side-effect free built-in functions, and therefore is ineffectual.
+ //
+ // Example:
+ // func f(a []int) int {
+ // defer len(a)
+ // return i
+ // }
+ UnusedResults
+
+ // InvalidDefer occurs when a deferred expression is not a function call,
+ // for example if the expression is a type conversion.
+ //
+ // Example:
+ // func f(i int) int {
+ // defer int32(i)
+ // return i
+ // }
+ InvalidDefer
+
+ // InvalidGo occurs when a go expression is not a function call, for example
+ // if the expression is a type conversion.
+ //
+ // Example:
+ // func f(i int) int {
+ // go int32(i)
+ // return i
+ // }
+ InvalidGo
+
+ // All codes below were added in Go 1.17.
+
+ /* decl */
+
+ // BadDecl occurs when a declaration has invalid syntax.
+ BadDecl
+
+ // RepeatedDecl occurs when an identifier occurs more than once on the left
+ // hand side of a short variable declaration.
+ //
+ // Example:
+ // func _() {
+ // x, y, y := 1, 2, 3
+ // }
+ RepeatedDecl
+
+ /* unsafe */
+
+ // InvalidUnsafeAdd occurs when unsafe.Add is called with a
+ // length argument that is not of integer type.
+ //
+ // Example:
+ // import "unsafe"
+ //
+ // var p unsafe.Pointer
+ // var _ = unsafe.Add(p, float64(1))
+ InvalidUnsafeAdd
+
+ // InvalidUnsafeSlice occurs when unsafe.Slice is called with a
+ // pointer argument that is not of pointer type or a length argument
+ // that is not of integer type, negative, or out of bounds.
+ //
+ // Example:
+ // import "unsafe"
+ //
+ // var x int
+ // var _ = unsafe.Slice(x, 1)
+ //
+ // Example:
+ // import "unsafe"
+ //
+ // var x int
+ // var _ = unsafe.Slice(&x, float64(1))
+ //
+ // Example:
+ // import "unsafe"
+ //
+ // var x int
+ // var _ = unsafe.Slice(&x, -1)
+ //
+ // Example:
+ // import "unsafe"
+ //
+ // var x int
+ // var _ = unsafe.Slice(&x, uint64(1) << 63)
+ InvalidUnsafeSlice
+
+ // All codes below were added in Go 1.18.
+
+ /* features */
+
+ // UnsupportedFeature occurs when a language feature is used that is not
+ // supported at this Go version.
+ UnsupportedFeature
+
+ /* type params */
+
+ // NotAGenericType occurs when a non-generic type is used where a generic
+ // type is expected: in type or function instantiation.
+ //
+ // Example:
+ // type T int
+ //
+ // var _ T[int]
+ NotAGenericType
+
+ // WrongTypeArgCount occurs when a type or function is instantiated with an
+ // incorrent number of type arguments, including when a generic type or
+ // function is used without instantiation.
+ //
+ // Errors inolving failed type inference are assigned other error codes.
+ //
+ // Example:
+ // type T[p any] int
+ //
+ // var _ T[int, string]
+ //
+ // Example:
+ // func f[T any]() {}
+ //
+ // var x = f
+ WrongTypeArgCount
+
+ // CannotInferTypeArgs occurs when type or function type argument inference
+ // fails to infer all type arguments.
+ //
+ // Example:
+ // func f[T any]() {}
+ //
+ // func _() {
+ // f()
+ // }
+ //
+ // Example:
+ // type N[P, Q any] struct{}
+ //
+ // var _ N[int]
+ CannotInferTypeArgs
+
+ // InvalidTypeArg occurs when a type argument does not satisfy its
+ // corresponding type parameter constraints.
+ //
+ // Example:
+ // type T[P ~int] struct{}
+ //
+ // var _ T[string]
+ InvalidTypeArg // arguments? InferenceFailed
+
+ // InvalidInstanceCycle occurs when an invalid cycle is detected
+ // within the instantiation graph.
+ //
+ // Example:
+ // func f[T any]() { f[*T]() }
+ InvalidInstanceCycle
+
+ // InvalidUnion occurs when an embedded union or approximation element is
+ // not valid.
+ //
+ // Example:
+ // type _ interface {
+ // ~int | interface{ m() }
+ // }
+ InvalidUnion
+
+ // MisplacedConstraintIface occurs when a constraint-type interface is used
+ // outside of constraint position.
+ //
+ // Example:
+ // type I interface { ~int }
+ //
+ // var _ I
+ MisplacedConstraintIface
+
+ // InvalidMethodTypeParams occurs when methods have type parameters.
+ //
+ // It cannot be encountered with an AST parsed using go/parser.
+ InvalidMethodTypeParams
+
+ // MisplacedTypeParam occurs when a type parameter is used in a place where
+ // it is not permitted.
+ //
+ // Example:
+ // type T[P any] P
+ //
+ // Example:
+ // type T[P any] struct{ *P }
+ MisplacedTypeParam
+
+ // InvalidUnsafeSliceData occurs when unsafe.SliceData is called with
+ // an argument that is not of slice type. It also occurs if it is used
+ // in a package compiled for a language version before go1.20.
+ //
+ // Example:
+ // import "unsafe"
+ //
+ // var x int
+ // var _ = unsafe.SliceData(x)
+ InvalidUnsafeSliceData
+
+ // InvalidUnsafeString occurs when unsafe.String is called with
+ // a length argument that is not of integer type, negative, or
+ // out of bounds. It also occurs if it is used in a package
+ // compiled for a language version before go1.20.
+ //
+ // Example:
+ // import "unsafe"
+ //
+ // var b [10]byte
+ // var _ = unsafe.String(&b[0], -1)
+ InvalidUnsafeString
+
+ // InvalidUnsafeStringData occurs if it is used in a package
+ // compiled for a language version before go1.20.
+ _ // not used anymore
+
+)
diff --git a/vendor/golang.org/x/tools/internal/typesinternal/errorcode_string.go b/vendor/golang.org/x/tools/internal/typesinternal/errorcode_string.go
new file mode 100644
index 0000000000..15ecf7c5de
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/typesinternal/errorcode_string.go
@@ -0,0 +1,179 @@
+// Code generated by "stringer -type=ErrorCode"; DO NOT EDIT.
+
+package typesinternal
+
+import "strconv"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[InvalidSyntaxTree - -1]
+ _ = x[Test-1]
+ _ = x[BlankPkgName-2]
+ _ = x[MismatchedPkgName-3]
+ _ = x[InvalidPkgUse-4]
+ _ = x[BadImportPath-5]
+ _ = x[BrokenImport-6]
+ _ = x[ImportCRenamed-7]
+ _ = x[UnusedImport-8]
+ _ = x[InvalidInitCycle-9]
+ _ = x[DuplicateDecl-10]
+ _ = x[InvalidDeclCycle-11]
+ _ = x[InvalidTypeCycle-12]
+ _ = x[InvalidConstInit-13]
+ _ = x[InvalidConstVal-14]
+ _ = x[InvalidConstType-15]
+ _ = x[UntypedNilUse-16]
+ _ = x[WrongAssignCount-17]
+ _ = x[UnassignableOperand-18]
+ _ = x[NoNewVar-19]
+ _ = x[MultiValAssignOp-20]
+ _ = x[InvalidIfaceAssign-21]
+ _ = x[InvalidChanAssign-22]
+ _ = x[IncompatibleAssign-23]
+ _ = x[UnaddressableFieldAssign-24]
+ _ = x[NotAType-25]
+ _ = x[InvalidArrayLen-26]
+ _ = x[BlankIfaceMethod-27]
+ _ = x[IncomparableMapKey-28]
+ _ = x[InvalidIfaceEmbed-29]
+ _ = x[InvalidPtrEmbed-30]
+ _ = x[BadRecv-31]
+ _ = x[InvalidRecv-32]
+ _ = x[DuplicateFieldAndMethod-33]
+ _ = x[DuplicateMethod-34]
+ _ = x[InvalidBlank-35]
+ _ = x[InvalidIota-36]
+ _ = x[MissingInitBody-37]
+ _ = x[InvalidInitSig-38]
+ _ = x[InvalidInitDecl-39]
+ _ = x[InvalidMainDecl-40]
+ _ = x[TooManyValues-41]
+ _ = x[NotAnExpr-42]
+ _ = x[TruncatedFloat-43]
+ _ = x[NumericOverflow-44]
+ _ = x[UndefinedOp-45]
+ _ = x[MismatchedTypes-46]
+ _ = x[DivByZero-47]
+ _ = x[NonNumericIncDec-48]
+ _ = x[UnaddressableOperand-49]
+ _ = x[InvalidIndirection-50]
+ _ = x[NonIndexableOperand-51]
+ _ = x[InvalidIndex-52]
+ _ = x[SwappedSliceIndices-53]
+ _ = x[NonSliceableOperand-54]
+ _ = x[InvalidSliceExpr-55]
+ _ = x[InvalidShiftCount-56]
+ _ = x[InvalidShiftOperand-57]
+ _ = x[InvalidReceive-58]
+ _ = x[InvalidSend-59]
+ _ = x[DuplicateLitKey-60]
+ _ = x[MissingLitKey-61]
+ _ = x[InvalidLitIndex-62]
+ _ = x[OversizeArrayLit-63]
+ _ = x[MixedStructLit-64]
+ _ = x[InvalidStructLit-65]
+ _ = x[MissingLitField-66]
+ _ = x[DuplicateLitField-67]
+ _ = x[UnexportedLitField-68]
+ _ = x[InvalidLitField-69]
+ _ = x[UntypedLit-70]
+ _ = x[InvalidLit-71]
+ _ = x[AmbiguousSelector-72]
+ _ = x[UndeclaredImportedName-73]
+ _ = x[UnexportedName-74]
+ _ = x[UndeclaredName-75]
+ _ = x[MissingFieldOrMethod-76]
+ _ = x[BadDotDotDotSyntax-77]
+ _ = x[NonVariadicDotDotDot-78]
+ _ = x[MisplacedDotDotDot-79]
+ _ = x[InvalidDotDotDotOperand-80]
+ _ = x[InvalidDotDotDot-81]
+ _ = x[UncalledBuiltin-82]
+ _ = x[InvalidAppend-83]
+ _ = x[InvalidCap-84]
+ _ = x[InvalidClose-85]
+ _ = x[InvalidCopy-86]
+ _ = x[InvalidComplex-87]
+ _ = x[InvalidDelete-88]
+ _ = x[InvalidImag-89]
+ _ = x[InvalidLen-90]
+ _ = x[SwappedMakeArgs-91]
+ _ = x[InvalidMake-92]
+ _ = x[InvalidReal-93]
+ _ = x[InvalidAssert-94]
+ _ = x[ImpossibleAssert-95]
+ _ = x[InvalidConversion-96]
+ _ = x[InvalidUntypedConversion-97]
+ _ = x[BadOffsetofSyntax-98]
+ _ = x[InvalidOffsetof-99]
+ _ = x[UnusedExpr-100]
+ _ = x[UnusedVar-101]
+ _ = x[MissingReturn-102]
+ _ = x[WrongResultCount-103]
+ _ = x[OutOfScopeResult-104]
+ _ = x[InvalidCond-105]
+ _ = x[InvalidPostDecl-106]
+ _ = x[InvalidChanRange-107]
+ _ = x[InvalidIterVar-108]
+ _ = x[InvalidRangeExpr-109]
+ _ = x[MisplacedBreak-110]
+ _ = x[MisplacedContinue-111]
+ _ = x[MisplacedFallthrough-112]
+ _ = x[DuplicateCase-113]
+ _ = x[DuplicateDefault-114]
+ _ = x[BadTypeKeyword-115]
+ _ = x[InvalidTypeSwitch-116]
+ _ = x[InvalidExprSwitch-117]
+ _ = x[InvalidSelectCase-118]
+ _ = x[UndeclaredLabel-119]
+ _ = x[DuplicateLabel-120]
+ _ = x[MisplacedLabel-121]
+ _ = x[UnusedLabel-122]
+ _ = x[JumpOverDecl-123]
+ _ = x[JumpIntoBlock-124]
+ _ = x[InvalidMethodExpr-125]
+ _ = x[WrongArgCount-126]
+ _ = x[InvalidCall-127]
+ _ = x[UnusedResults-128]
+ _ = x[InvalidDefer-129]
+ _ = x[InvalidGo-130]
+ _ = x[BadDecl-131]
+ _ = x[RepeatedDecl-132]
+ _ = x[InvalidUnsafeAdd-133]
+ _ = x[InvalidUnsafeSlice-134]
+ _ = x[UnsupportedFeature-135]
+ _ = x[NotAGenericType-136]
+ _ = x[WrongTypeArgCount-137]
+ _ = x[CannotInferTypeArgs-138]
+ _ = x[InvalidTypeArg-139]
+ _ = x[InvalidInstanceCycle-140]
+ _ = x[InvalidUnion-141]
+ _ = x[MisplacedConstraintIface-142]
+ _ = x[InvalidMethodTypeParams-143]
+ _ = x[MisplacedTypeParam-144]
+ _ = x[InvalidUnsafeSliceData-145]
+ _ = x[InvalidUnsafeString-146]
+}
+
+const (
+ _ErrorCode_name_0 = "InvalidSyntaxTree"
+ _ErrorCode_name_1 = "TestBlankPkgNameMismatchedPkgNameInvalidPkgUseBadImportPathBrokenImportImportCRenamedUnusedImportInvalidInitCycleDuplicateDeclInvalidDeclCycleInvalidTypeCycleInvalidConstInitInvalidConstValInvalidConstTypeUntypedNilUseWrongAssignCountUnassignableOperandNoNewVarMultiValAssignOpInvalidIfaceAssignInvalidChanAssignIncompatibleAssignUnaddressableFieldAssignNotATypeInvalidArrayLenBlankIfaceMethodIncomparableMapKeyInvalidIfaceEmbedInvalidPtrEmbedBadRecvInvalidRecvDuplicateFieldAndMethodDuplicateMethodInvalidBlankInvalidIotaMissingInitBodyInvalidInitSigInvalidInitDeclInvalidMainDeclTooManyValuesNotAnExprTruncatedFloatNumericOverflowUndefinedOpMismatchedTypesDivByZeroNonNumericIncDecUnaddressableOperandInvalidIndirectionNonIndexableOperandInvalidIndexSwappedSliceIndicesNonSliceableOperandInvalidSliceExprInvalidShiftCountInvalidShiftOperandInvalidReceiveInvalidSendDuplicateLitKeyMissingLitKeyInvalidLitIndexOversizeArrayLitMixedStructLitInvalidStructLitMissingLitFieldDuplicateLitFieldUnexportedLitFieldInvalidLitFieldUntypedLitInvalidLitAmbiguousSelectorUndeclaredImportedNameUnexportedNameUndeclaredNameMissingFieldOrMethodBadDotDotDotSyntaxNonVariadicDotDotDotMisplacedDotDotDotInvalidDotDotDotOperandInvalidDotDotDotUncalledBuiltinInvalidAppendInvalidCapInvalidCloseInvalidCopyInvalidComplexInvalidDeleteInvalidImagInvalidLenSwappedMakeArgsInvalidMakeInvalidRealInvalidAssertImpossibleAssertInvalidConversionInvalidUntypedConversionBadOffsetofSyntaxInvalidOffsetofUnusedExprUnusedVarMissingReturnWrongResultCountOutOfScopeResultInvalidCondInvalidPostDeclInvalidChanRangeInvalidIterVarInvalidRangeExprMisplacedBreakMisplacedContinueMisplacedFallthroughDuplicateCaseDuplicateDefaultBadTypeKeywordInvalidTypeSwitchInvalidExprSwitchInvalidSelectCaseUndeclaredLabelDuplicateLabelMisplacedLabelUnusedLabelJumpOverDeclJumpIntoBlockInvalidMethodExprWrongArgCountInvalidCallUnusedResultsInvalidDeferInvalidGoBadDeclRepeatedDeclInvalidUnsafeAddInvalidUnsafeSliceUnsupportedFeatureNotAGenericTypeWrongTypeArgCountCannotInferTypeArgsInvalidTypeArgInvalidInstanceCycleInvalidUnionMisplacedConstraintIfaceInvalidMethodTypeParamsMisplacedTypeParamInvalidUnsafeSliceDataInvalidUnsafeString"
+)
+
+var (
+ _ErrorCode_index_1 = [...]uint16{0, 4, 16, 33, 46, 59, 71, 85, 97, 113, 126, 142, 158, 174, 189, 205, 218, 234, 253, 261, 277, 295, 312, 330, 354, 362, 377, 393, 411, 428, 443, 450, 461, 484, 499, 511, 522, 537, 551, 566, 581, 594, 603, 617, 632, 643, 658, 667, 683, 703, 721, 740, 752, 771, 790, 806, 823, 842, 856, 867, 882, 895, 910, 926, 940, 956, 971, 988, 1006, 1021, 1031, 1041, 1058, 1080, 1094, 1108, 1128, 1146, 1166, 1184, 1207, 1223, 1238, 1251, 1261, 1273, 1284, 1298, 1311, 1322, 1332, 1347, 1358, 1369, 1382, 1398, 1415, 1439, 1456, 1471, 1481, 1490, 1503, 1519, 1535, 1546, 1561, 1577, 1591, 1607, 1621, 1638, 1658, 1671, 1687, 1701, 1718, 1735, 1752, 1767, 1781, 1795, 1806, 1818, 1831, 1848, 1861, 1872, 1885, 1897, 1906, 1913, 1925, 1941, 1959, 1977, 1992, 2009, 2028, 2042, 2062, 2074, 2098, 2121, 2139, 2161, 2180}
+)
+
+func (i ErrorCode) String() string {
+ switch {
+ case i == -1:
+ return _ErrorCode_name_0
+ case 1 <= i && i <= 146:
+ i -= 1
+ return _ErrorCode_name_1[_ErrorCode_index_1[i]:_ErrorCode_index_1[i+1]]
+ default:
+ return "ErrorCode(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+}
diff --git a/vendor/golang.org/x/tools/internal/typesinternal/types.go b/vendor/golang.org/x/tools/internal/typesinternal/types.go
new file mode 100644
index 0000000000..ce7d4351b2
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/typesinternal/types.go
@@ -0,0 +1,52 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package typesinternal provides access to internal go/types APIs that are not
+// yet exported.
+package typesinternal
+
+import (
+ "go/token"
+ "go/types"
+ "reflect"
+ "unsafe"
+)
+
+func SetUsesCgo(conf *types.Config) bool {
+ v := reflect.ValueOf(conf).Elem()
+
+ f := v.FieldByName("go115UsesCgo")
+ if !f.IsValid() {
+ f = v.FieldByName("UsesCgo")
+ if !f.IsValid() {
+ return false
+ }
+ }
+
+ addr := unsafe.Pointer(f.UnsafeAddr())
+ *(*bool)(addr) = true
+
+ return true
+}
+
+// ReadGo116ErrorData extracts additional information from types.Error values
+// generated by Go version 1.16 and later: the error code, start position, and
+// end position. If all positions are valid, start <= err.Pos <= end.
+//
+// If the data could not be read, the final result parameter will be false.
+func ReadGo116ErrorData(err types.Error) (code ErrorCode, start, end token.Pos, ok bool) {
+ var data [3]int
+ // By coincidence all of these fields are ints, which simplifies things.
+ v := reflect.ValueOf(err)
+ for i, name := range []string{"go116code", "go116start", "go116end"} {
+ f := v.FieldByName(name)
+ if !f.IsValid() {
+ return 0, 0, 0, false
+ }
+ data[i] = int(f.Int())
+ }
+ return ErrorCode(data[0]), token.Pos(data[1]), token.Pos(data[2]), true
+}
+
+var SetGoVersion = func(conf *types.Config, version string) bool { return false }
diff --git a/vendor/golang.org/x/tools/internal/typesinternal/types_118.go b/vendor/golang.org/x/tools/internal/typesinternal/types_118.go
new file mode 100644
index 0000000000..a42b072a67
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/typesinternal/types_118.go
@@ -0,0 +1,19 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.18
+// +build go1.18
+
+package typesinternal
+
+import (
+ "go/types"
+)
+
+func init() {
+ SetGoVersion = func(conf *types.Config, version string) bool {
+ conf.GoVersion = version
+ return true
+ }
+}
diff --git a/vendor/golang.org/x/tools/internal/versions/gover.go b/vendor/golang.org/x/tools/internal/versions/gover.go
new file mode 100644
index 0000000000..bbabcd22e9
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/versions/gover.go
@@ -0,0 +1,172 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This is a fork of internal/gover for use by x/tools until
+// go1.21 and earlier are no longer supported by x/tools.
+
+package versions
+
+import "strings"
+
+// A gover is a parsed Go gover: major[.Minor[.Patch]][kind[pre]]
+// The numbers are the original decimal strings to avoid integer overflows
+// and since there is very little actual math. (Probably overflow doesn't matter in practice,
+// but at the time this code was written, there was an existing test that used
+// go1.99999999999, which does not fit in an int on 32-bit platforms.
+// The "big decimal" representation avoids the problem entirely.)
+type gover struct {
+ major string // decimal
+ minor string // decimal or ""
+ patch string // decimal or ""
+ kind string // "", "alpha", "beta", "rc"
+ pre string // decimal or ""
+}
+
+// compare returns -1, 0, or +1 depending on whether
+// x < y, x == y, or x > y, interpreted as toolchain versions.
+// The versions x and y must not begin with a "go" prefix: just "1.21" not "go1.21".
+// Malformed versions compare less than well-formed versions and equal to each other.
+// The language version "1.21" compares less than the release candidate and eventual releases "1.21rc1" and "1.21.0".
+func compare(x, y string) int {
+ vx := parse(x)
+ vy := parse(y)
+
+ if c := cmpInt(vx.major, vy.major); c != 0 {
+ return c
+ }
+ if c := cmpInt(vx.minor, vy.minor); c != 0 {
+ return c
+ }
+ if c := cmpInt(vx.patch, vy.patch); c != 0 {
+ return c
+ }
+ if c := strings.Compare(vx.kind, vy.kind); c != 0 { // "" < alpha < beta < rc
+ return c
+ }
+ if c := cmpInt(vx.pre, vy.pre); c != 0 {
+ return c
+ }
+ return 0
+}
+
+// lang returns the Go language version. For example, lang("1.2.3") == "1.2".
+func lang(x string) string {
+ v := parse(x)
+ if v.minor == "" || v.major == "1" && v.minor == "0" {
+ return v.major
+ }
+ return v.major + "." + v.minor
+}
+
+// isValid reports whether the version x is valid.
+func isValid(x string) bool {
+ return parse(x) != gover{}
+}
+
+// parse parses the Go version string x into a version.
+// It returns the zero version if x is malformed.
+func parse(x string) gover {
+ var v gover
+
+ // Parse major version.
+ var ok bool
+ v.major, x, ok = cutInt(x)
+ if !ok {
+ return gover{}
+ }
+ if x == "" {
+ // Interpret "1" as "1.0.0".
+ v.minor = "0"
+ v.patch = "0"
+ return v
+ }
+
+ // Parse . before minor version.
+ if x[0] != '.' {
+ return gover{}
+ }
+
+ // Parse minor version.
+ v.minor, x, ok = cutInt(x[1:])
+ if !ok {
+ return gover{}
+ }
+ if x == "" {
+ // Patch missing is same as "0" for older versions.
+ // Starting in Go 1.21, patch missing is different from explicit .0.
+ if cmpInt(v.minor, "21") < 0 {
+ v.patch = "0"
+ }
+ return v
+ }
+
+ // Parse patch if present.
+ if x[0] == '.' {
+ v.patch, x, ok = cutInt(x[1:])
+ if !ok || x != "" {
+ // Note that we are disallowing prereleases (alpha, beta, rc) for patch releases here (x != "").
+ // Allowing them would be a bit confusing because we already have:
+ // 1.21 < 1.21rc1
+ // But a prerelease of a patch would have the opposite effect:
+ // 1.21.3rc1 < 1.21.3
+ // We've never needed them before, so let's not start now.
+ return gover{}
+ }
+ return v
+ }
+
+ // Parse prerelease.
+ i := 0
+ for i < len(x) && (x[i] < '0' || '9' < x[i]) {
+ if x[i] < 'a' || 'z' < x[i] {
+ return gover{}
+ }
+ i++
+ }
+ if i == 0 {
+ return gover{}
+ }
+ v.kind, x = x[:i], x[i:]
+ if x == "" {
+ return v
+ }
+ v.pre, x, ok = cutInt(x)
+ if !ok || x != "" {
+ return gover{}
+ }
+
+ return v
+}
+
+// cutInt scans the leading decimal number at the start of x to an integer
+// and returns that value and the rest of the string.
+func cutInt(x string) (n, rest string, ok bool) {
+ i := 0
+ for i < len(x) && '0' <= x[i] && x[i] <= '9' {
+ i++
+ }
+ if i == 0 || x[0] == '0' && i != 1 { // no digits or unnecessary leading zero
+ return "", "", false
+ }
+ return x[:i], x[i:], true
+}
+
+// cmpInt returns cmp.Compare(x, y) interpreting x and y as decimal numbers.
+// (Copied from golang.org/x/mod/semver's compareInt.)
+func cmpInt(x, y string) int {
+ if x == y {
+ return 0
+ }
+ if len(x) < len(y) {
+ return -1
+ }
+ if len(x) > len(y) {
+ return +1
+ }
+ if x < y {
+ return -1
+ } else {
+ return +1
+ }
+}
diff --git a/vendor/golang.org/x/tools/internal/versions/types.go b/vendor/golang.org/x/tools/internal/versions/types.go
new file mode 100644
index 0000000000..562eef21fa
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/versions/types.go
@@ -0,0 +1,19 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package versions
+
+import (
+ "go/types"
+)
+
+// GoVersion returns the Go version of the type package.
+// It returns zero if no version can be determined.
+func GoVersion(pkg *types.Package) string {
+ // TODO(taking): x/tools can call GoVersion() [from 1.21] after 1.25.
+ if pkg, ok := any(pkg).(interface{ GoVersion() string }); ok {
+ return pkg.GoVersion()
+ }
+ return ""
+}
diff --git a/vendor/golang.org/x/tools/internal/versions/types_go121.go b/vendor/golang.org/x/tools/internal/versions/types_go121.go
new file mode 100644
index 0000000000..a7b79207ae
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/versions/types_go121.go
@@ -0,0 +1,20 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !go1.22
+// +build !go1.22
+
+package versions
+
+import (
+ "go/ast"
+ "go/types"
+)
+
+// FileVersions always reports the a file's Go version as the
+// zero version at this Go version.
+func FileVersions(info *types.Info, file *ast.File) string { return "" }
+
+// InitFileVersions is a noop at this Go version.
+func InitFileVersions(*types.Info) {}
diff --git a/vendor/golang.org/x/tools/internal/versions/types_go122.go b/vendor/golang.org/x/tools/internal/versions/types_go122.go
new file mode 100644
index 0000000000..7b9ba89a82
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/versions/types_go122.go
@@ -0,0 +1,24 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.22
+// +build go1.22
+
+package versions
+
+import (
+ "go/ast"
+ "go/types"
+)
+
+// FileVersions maps a file to the file's semantic Go version.
+// The reported version is the zero version if a version cannot be determined.
+func FileVersions(info *types.Info, file *ast.File) string {
+ return info.FileVersions[file]
+}
+
+// InitFileVersions initializes info to record Go versions for Go files.
+func InitFileVersions(info *types.Info) {
+ info.FileVersions = make(map[*ast.File]string)
+}
diff --git a/vendor/golang.org/x/tools/internal/versions/versions_go121.go b/vendor/golang.org/x/tools/internal/versions/versions_go121.go
new file mode 100644
index 0000000000..cf4a7d0360
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/versions/versions_go121.go
@@ -0,0 +1,49 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !go1.22
+// +build !go1.22
+
+package versions
+
+// Lang returns the Go language version for version x.
+// If x is not a valid version, Lang returns the empty string.
+// For example:
+//
+// Lang("go1.21rc2") = "go1.21"
+// Lang("go1.21.2") = "go1.21"
+// Lang("go1.21") = "go1.21"
+// Lang("go1") = "go1"
+// Lang("bad") = ""
+// Lang("1.21") = ""
+func Lang(x string) string {
+ v := lang(stripGo(x))
+ if v == "" {
+ return ""
+ }
+ return x[:2+len(v)] // "go"+v without allocation
+}
+
+// Compare returns -1, 0, or +1 depending on whether
+// x < y, x == y, or x > y, interpreted as Go versions.
+// The versions x and y must begin with a "go" prefix: "go1.21" not "1.21".
+// Invalid versions, including the empty string, compare less than
+// valid versions and equal to each other.
+// The language version "go1.21" compares less than the
+// release candidate and eventual releases "go1.21rc1" and "go1.21.0".
+// Custom toolchain suffixes are ignored during comparison:
+// "go1.21.0" and "go1.21.0-bigcorp" are equal.
+func Compare(x, y string) int { return compare(stripGo(x), stripGo(y)) }
+
+// IsValid reports whether the version x is valid.
+func IsValid(x string) bool { return isValid(stripGo(x)) }
+
+// stripGo converts from a "go1.21" version to a "1.21" version.
+// If v does not start with "go", stripGo returns the empty string (a known invalid version).
+func stripGo(v string) string {
+ if len(v) < 2 || v[:2] != "go" {
+ return ""
+ }
+ return v[2:]
+}
diff --git a/vendor/golang.org/x/tools/internal/versions/versions_go122.go b/vendor/golang.org/x/tools/internal/versions/versions_go122.go
new file mode 100644
index 0000000000..c1c1814b28
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/versions/versions_go122.go
@@ -0,0 +1,38 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.22
+// +build go1.22
+
+package versions
+
+import (
+ "go/version"
+)
+
+// Lang returns the Go language version for version x.
+// If x is not a valid version, Lang returns the empty string.
+// For example:
+//
+// Lang("go1.21rc2") = "go1.21"
+// Lang("go1.21.2") = "go1.21"
+// Lang("go1.21") = "go1.21"
+// Lang("go1") = "go1"
+// Lang("bad") = ""
+// Lang("1.21") = ""
+func Lang(x string) string { return version.Lang(x) }
+
+// Compare returns -1, 0, or +1 depending on whether
+// x < y, x == y, or x > y, interpreted as Go versions.
+// The versions x and y must begin with a "go" prefix: "go1.21" not "1.21".
+// Invalid versions, including the empty string, compare less than
+// valid versions and equal to each other.
+// The language version "go1.21" compares less than the
+// release candidate and eventual releases "go1.21rc1" and "go1.21.0".
+// Custom toolchain suffixes are ignored during comparison:
+// "go1.21.0" and "go1.21.0-bigcorp" are equal.
+func Compare(x, y string) int { return version.Compare(x, y) }
+
+// IsValid reports whether the version x is valid.
+func IsValid(x string) bool { return version.IsValid(x) }
diff --git a/vendor/gopkg.in/natefinch/lumberjack.v2/.travis.yml b/vendor/gopkg.in/natefinch/lumberjack.v2/.travis.yml
index 65dcbc56dc..21166f5c7d 100644
--- a/vendor/gopkg.in/natefinch/lumberjack.v2/.travis.yml
+++ b/vendor/gopkg.in/natefinch/lumberjack.v2/.travis.yml
@@ -1,6 +1,11 @@
language: go
go:
- - 1.8
- - 1.7
- - 1.6
\ No newline at end of file
+ - tip
+ - 1.15.x
+ - 1.14.x
+ - 1.13.x
+ - 1.12.x
+
+env:
+ - GO111MODULE=on
diff --git a/vendor/gopkg.in/natefinch/lumberjack.v2/chown_linux.go b/vendor/gopkg.in/natefinch/lumberjack.v2/chown_linux.go
index 2758ec9ced..465f569270 100644
--- a/vendor/gopkg.in/natefinch/lumberjack.v2/chown_linux.go
+++ b/vendor/gopkg.in/natefinch/lumberjack.v2/chown_linux.go
@@ -5,8 +5,8 @@ import (
"syscall"
)
-// os_Chown is a var so we can mock it out during tests.
-var os_Chown = os.Chown
+// osChown is a var so we can mock it out during tests.
+var osChown = os.Chown
func chown(name string, info os.FileInfo) error {
f, err := os.OpenFile(name, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, info.Mode())
@@ -15,5 +15,5 @@ func chown(name string, info os.FileInfo) error {
}
f.Close()
stat := info.Sys().(*syscall.Stat_t)
- return os_Chown(name, int(stat.Uid), int(stat.Gid))
+ return osChown(name, int(stat.Uid), int(stat.Gid))
}
diff --git a/vendor/gopkg.in/natefinch/lumberjack.v2/lumberjack.go b/vendor/gopkg.in/natefinch/lumberjack.v2/lumberjack.go
index 46d97c5531..3447cdc056 100644
--- a/vendor/gopkg.in/natefinch/lumberjack.v2/lumberjack.go
+++ b/vendor/gopkg.in/natefinch/lumberjack.v2/lumberjack.go
@@ -120,7 +120,7 @@ var (
currentTime = time.Now
// os_Stat exists so it can be mocked out by tests.
- os_Stat = os.Stat
+ osStat = os.Stat
// megabyte is the conversion factor between MaxSize and bytes. It is a
// variable so tests can mock it out and not need to write megabytes of data
@@ -206,14 +206,14 @@ func (l *Logger) rotate() error {
// openNew opens a new log file for writing, moving any old log file out of the
// way. This methods assumes the file has already been closed.
func (l *Logger) openNew() error {
- err := os.MkdirAll(l.dir(), 0744)
+ err := os.MkdirAll(l.dir(), 0755)
if err != nil {
return fmt.Errorf("can't make directories for new logfile: %s", err)
}
name := l.filename()
- mode := os.FileMode(0644)
- info, err := os_Stat(name)
+ mode := os.FileMode(0600)
+ info, err := osStat(name)
if err == nil {
// Copy the mode off the old logfile.
mode = info.Mode()
@@ -265,7 +265,7 @@ func (l *Logger) openExistingOrNew(writeLen int) error {
l.mill()
filename := l.filename()
- info, err := os_Stat(filename)
+ info, err := osStat(filename)
if os.IsNotExist(err) {
return l.openNew()
}
@@ -288,7 +288,7 @@ func (l *Logger) openExistingOrNew(writeLen int) error {
return nil
}
-// genFilename generates the name of the logfile from the current time.
+// filename generates the name of the logfile from the current time.
func (l *Logger) filename() string {
if l.Filename != "" {
return l.Filename
@@ -376,7 +376,7 @@ func (l *Logger) millRunOnce() error {
// millRun runs in a goroutine to manage post-rotation compression and removal
// of old log files.
func (l *Logger) millRun() {
- for _ = range l.millCh {
+ for range l.millCh {
// what am I going to do, log this?
_ = l.millRunOnce()
}
@@ -472,7 +472,7 @@ func compressLogFile(src, dst string) (err error) {
}
defer f.Close()
- fi, err := os_Stat(src)
+ fi, err := osStat(src)
if err != nil {
return fmt.Errorf("failed to stat log file: %v", err)
}
diff --git a/vendor/k8s.io/api/admissionregistration/v1/generated.proto b/vendor/k8s.io/api/admissionregistration/v1/generated.proto
index cdf1f47655..a8903621c8 100644
--- a/vendor/k8s.io/api/admissionregistration/v1/generated.proto
+++ b/vendor/k8s.io/api/admissionregistration/v1/generated.proto
@@ -215,7 +215,7 @@ message MutatingWebhook {
// - If failurePolicy=Fail, reject the request
// - If failurePolicy=Ignore, the error is ignored and the webhook is skipped
//
- // This is an alpha feature and managed by the AdmissionWebhookMatchConditions feature gate.
+ // This is a beta feature and managed by the AdmissionWebhookMatchConditions feature gate.
//
// +patchMergeKey=name
// +patchStrategy=merge
@@ -473,7 +473,7 @@ message ValidatingWebhook {
// - If failurePolicy=Fail, reject the request
// - If failurePolicy=Ignore, the error is ignored and the webhook is skipped
//
- // This is an alpha feature and managed by the AdmissionWebhookMatchConditions feature gate.
+ // This is a beta feature and managed by the AdmissionWebhookMatchConditions feature gate.
//
// +patchMergeKey=name
// +patchStrategy=merge
diff --git a/vendor/k8s.io/api/admissionregistration/v1/types.go b/vendor/k8s.io/api/admissionregistration/v1/types.go
index 74f17d54a2..07ed7a6246 100644
--- a/vendor/k8s.io/api/admissionregistration/v1/types.go
+++ b/vendor/k8s.io/api/admissionregistration/v1/types.go
@@ -320,7 +320,7 @@ type ValidatingWebhook struct {
// - If failurePolicy=Fail, reject the request
// - If failurePolicy=Ignore, the error is ignored and the webhook is skipped
//
- // This is an alpha feature and managed by the AdmissionWebhookMatchConditions feature gate.
+ // This is a beta feature and managed by the AdmissionWebhookMatchConditions feature gate.
//
// +patchMergeKey=name
// +patchStrategy=merge
@@ -489,7 +489,7 @@ type MutatingWebhook struct {
// - If failurePolicy=Fail, reject the request
// - If failurePolicy=Ignore, the error is ignored and the webhook is skipped
//
- // This is an alpha feature and managed by the AdmissionWebhookMatchConditions feature gate.
+ // This is a beta feature and managed by the AdmissionWebhookMatchConditions feature gate.
//
// +patchMergeKey=name
// +patchStrategy=merge
diff --git a/vendor/k8s.io/api/admissionregistration/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/admissionregistration/v1/types_swagger_doc_generated.go
index ce306b307a..c41cceb2f2 100644
--- a/vendor/k8s.io/api/admissionregistration/v1/types_swagger_doc_generated.go
+++ b/vendor/k8s.io/api/admissionregistration/v1/types_swagger_doc_generated.go
@@ -50,7 +50,7 @@ var map_MutatingWebhook = map[string]string{
"timeoutSeconds": "TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, the webhook call will be ignored or the API call will fail based on the failure policy. The timeout value must be between 1 and 30 seconds. Default to 10 seconds.",
"admissionReviewVersions": "AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` versions the Webhook expects. API server will try to use first version in the list which it supports. If none of the versions specified in this list supported by API server, validation will fail for this object. If a persisted webhook configuration specifies allowed versions and does not include any versions known to the API Server, calls to the webhook will fail and be subject to the failure policy.",
"reinvocationPolicy": "reinvocationPolicy indicates whether this webhook should be called multiple times as part of a single admission evaluation. Allowed values are \"Never\" and \"IfNeeded\".\n\nNever: the webhook will not be called more than once in a single admission evaluation.\n\nIfNeeded: the webhook will be called at least one additional time as part of the admission evaluation if the object being admitted is modified by other admission plugins after the initial webhook call. Webhooks that specify this option *must* be idempotent, able to process objects they previously admitted. Note: * the number of additional invocations is not guaranteed to be exactly one. * if additional invocations result in further modifications to the object, webhooks are not guaranteed to be invoked again. * webhooks that use this option may be reordered to minimize the number of additional invocations. * to validate an object after all mutations are guaranteed complete, use a validating admission webhook instead.\n\nDefaults to \"Never\".",
- "matchConditions": "MatchConditions is a list of conditions that must be met for a request to be sent to this webhook. Match conditions filter requests that have already been matched by the rules, namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. There are a maximum of 64 match conditions allowed.\n\nThe exact matching logic is (in order):\n 1. If ANY matchCondition evaluates to FALSE, the webhook is skipped.\n 2. If ALL matchConditions evaluate to TRUE, the webhook is called.\n 3. If any matchCondition evaluates to an error (but none are FALSE):\n - If failurePolicy=Fail, reject the request\n - If failurePolicy=Ignore, the error is ignored and the webhook is skipped\n\nThis is an alpha feature and managed by the AdmissionWebhookMatchConditions feature gate.",
+ "matchConditions": "MatchConditions is a list of conditions that must be met for a request to be sent to this webhook. Match conditions filter requests that have already been matched by the rules, namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. There are a maximum of 64 match conditions allowed.\n\nThe exact matching logic is (in order):\n 1. If ANY matchCondition evaluates to FALSE, the webhook is skipped.\n 2. If ALL matchConditions evaluate to TRUE, the webhook is called.\n 3. If any matchCondition evaluates to an error (but none are FALSE):\n - If failurePolicy=Fail, reject the request\n - If failurePolicy=Ignore, the error is ignored and the webhook is skipped\n\nThis is a beta feature and managed by the AdmissionWebhookMatchConditions feature gate.",
}
func (MutatingWebhook) SwaggerDoc() map[string]string {
@@ -122,7 +122,7 @@ var map_ValidatingWebhook = map[string]string{
"sideEffects": "SideEffects states whether this webhook has side effects. Acceptable values are: None, NoneOnDryRun (webhooks created via v1beta1 may also specify Some or Unknown). Webhooks with side effects MUST implement a reconciliation system, since a request may be rejected by a future step in the admission chain and the side effects therefore need to be undone. Requests with the dryRun attribute will be auto-rejected if they match a webhook with sideEffects == Unknown or Some.",
"timeoutSeconds": "TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, the webhook call will be ignored or the API call will fail based on the failure policy. The timeout value must be between 1 and 30 seconds. Default to 10 seconds.",
"admissionReviewVersions": "AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` versions the Webhook expects. API server will try to use first version in the list which it supports. If none of the versions specified in this list supported by API server, validation will fail for this object. If a persisted webhook configuration specifies allowed versions and does not include any versions known to the API Server, calls to the webhook will fail and be subject to the failure policy.",
- "matchConditions": "MatchConditions is a list of conditions that must be met for a request to be sent to this webhook. Match conditions filter requests that have already been matched by the rules, namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. There are a maximum of 64 match conditions allowed.\n\nThe exact matching logic is (in order):\n 1. If ANY matchCondition evaluates to FALSE, the webhook is skipped.\n 2. If ALL matchConditions evaluate to TRUE, the webhook is called.\n 3. If any matchCondition evaluates to an error (but none are FALSE):\n - If failurePolicy=Fail, reject the request\n - If failurePolicy=Ignore, the error is ignored and the webhook is skipped\n\nThis is an alpha feature and managed by the AdmissionWebhookMatchConditions feature gate.",
+ "matchConditions": "MatchConditions is a list of conditions that must be met for a request to be sent to this webhook. Match conditions filter requests that have already been matched by the rules, namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. There are a maximum of 64 match conditions allowed.\n\nThe exact matching logic is (in order):\n 1. If ANY matchCondition evaluates to FALSE, the webhook is skipped.\n 2. If ALL matchConditions evaluate to TRUE, the webhook is called.\n 3. If any matchCondition evaluates to an error (but none are FALSE):\n - If failurePolicy=Fail, reject the request\n - If failurePolicy=Ignore, the error is ignored and the webhook is skipped\n\nThis is a beta feature and managed by the AdmissionWebhookMatchConditions feature gate.",
}
func (ValidatingWebhook) SwaggerDoc() map[string]string {
diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.pb.go b/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.pb.go
index 7465350263..4f1373ec5a 100644
--- a/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.pb.go
+++ b/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.pb.go
@@ -493,6 +493,34 @@ func (m *Validation) XXX_DiscardUnknown() {
var xxx_messageInfo_Validation proto.InternalMessageInfo
+func (m *Variable) Reset() { *m = Variable{} }
+func (*Variable) ProtoMessage() {}
+func (*Variable) Descriptor() ([]byte, []int) {
+ return fileDescriptor_c3be8d256e3ae3cf, []int{16}
+}
+func (m *Variable) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Variable) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *Variable) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Variable.Merge(m, src)
+}
+func (m *Variable) XXX_Size() int {
+ return m.Size()
+}
+func (m *Variable) XXX_DiscardUnknown() {
+ xxx_messageInfo_Variable.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Variable proto.InternalMessageInfo
+
func init() {
proto.RegisterType((*AuditAnnotation)(nil), "k8s.io.api.admissionregistration.v1alpha1.AuditAnnotation")
proto.RegisterType((*ExpressionWarning)(nil), "k8s.io.api.admissionregistration.v1alpha1.ExpressionWarning")
@@ -510,6 +538,7 @@ func init() {
proto.RegisterType((*ValidatingAdmissionPolicySpec)(nil), "k8s.io.api.admissionregistration.v1alpha1.ValidatingAdmissionPolicySpec")
proto.RegisterType((*ValidatingAdmissionPolicyStatus)(nil), "k8s.io.api.admissionregistration.v1alpha1.ValidatingAdmissionPolicyStatus")
proto.RegisterType((*Validation)(nil), "k8s.io.api.admissionregistration.v1alpha1.Validation")
+ proto.RegisterType((*Variable)(nil), "k8s.io.api.admissionregistration.v1alpha1.Variable")
}
func init() {
@@ -517,95 +546,102 @@ func init() {
}
var fileDescriptor_c3be8d256e3ae3cf = []byte{
- // 1407 bytes of a gzipped FileDescriptorProto
+ // 1509 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0xcb, 0x6f, 0x1b, 0x45,
- 0x18, 0xcf, 0xc6, 0x4e, 0x9a, 0x8c, 0xf3, 0xb0, 0x87, 0x56, 0x75, 0x23, 0x6a, 0x47, 0xab, 0x0a,
- 0x35, 0x12, 0xec, 0x92, 0xb4, 0x50, 0x40, 0x48, 0x28, 0xdb, 0x17, 0x7d, 0xa4, 0x89, 0xa6, 0x28,
- 0x91, 0x10, 0x95, 0x98, 0xec, 0x4e, 0xec, 0xa9, 0xbd, 0x0f, 0x76, 0xd6, 0xa1, 0x11, 0x48, 0x54,
- 0xe2, 0x02, 0x37, 0x0e, 0x5c, 0xf8, 0x5f, 0xb8, 0x70, 0xeb, 0xb1, 0xc7, 0x72, 0xc0, 0x22, 0xe6,
- 0xc2, 0x5f, 0x00, 0x52, 0x2e, 0xa0, 0x99, 0x9d, 0x7d, 0x3b, 0xc4, 0x2e, 0x81, 0x9b, 0xf7, 0x7b,
- 0xfc, 0x7e, 0xf3, 0x7d, 0xf3, 0x7d, 0x33, 0xdf, 0x18, 0xa0, 0xce, 0x3b, 0x4c, 0xa3, 0xae, 0xde,
- 0xe9, 0xed, 0x12, 0xdf, 0x21, 0x01, 0x61, 0xfa, 0x3e, 0x71, 0x2c, 0xd7, 0xd7, 0xa5, 0x02, 0x7b,
- 0x54, 0xc7, 0x96, 0x4d, 0x19, 0xa3, 0xae, 0xe3, 0x93, 0x16, 0x65, 0x81, 0x8f, 0x03, 0xea, 0x3a,
- 0xfa, 0xfe, 0x2a, 0xee, 0x7a, 0x6d, 0xbc, 0xaa, 0xb7, 0x88, 0x43, 0x7c, 0x1c, 0x10, 0x4b, 0xf3,
- 0x7c, 0x37, 0x70, 0xe1, 0x4a, 0xe8, 0xaa, 0x61, 0x8f, 0x6a, 0x43, 0x5d, 0xb5, 0xc8, 0x75, 0xe9,
- 0x8d, 0x16, 0x0d, 0xda, 0xbd, 0x5d, 0xcd, 0x74, 0x6d, 0xbd, 0xe5, 0xb6, 0x5c, 0x5d, 0x20, 0xec,
- 0xf6, 0xf6, 0xc4, 0x97, 0xf8, 0x10, 0xbf, 0x42, 0xe4, 0xa5, 0x2b, 0x23, 0x2c, 0x2a, 0xbf, 0x9c,
- 0xa5, 0xab, 0x89, 0x93, 0x8d, 0xcd, 0x36, 0x75, 0x88, 0x7f, 0xa0, 0x7b, 0x9d, 0x16, 0x17, 0x30,
- 0xdd, 0x26, 0x01, 0x1e, 0xe6, 0xa5, 0x1f, 0xe7, 0xe5, 0xf7, 0x9c, 0x80, 0xda, 0xa4, 0xe0, 0xf0,
- 0xf6, 0x49, 0x0e, 0xcc, 0x6c, 0x13, 0x1b, 0xe7, 0xfd, 0x54, 0x06, 0x16, 0xd7, 0x7b, 0x16, 0x0d,
- 0xd6, 0x1d, 0xc7, 0x0d, 0x44, 0x10, 0xf0, 0x22, 0x28, 0x75, 0xc8, 0x41, 0x5d, 0x59, 0x56, 0x2e,
- 0xcf, 0x1a, 0x95, 0x67, 0xfd, 0xe6, 0xc4, 0xa0, 0xdf, 0x2c, 0xdd, 0x23, 0x07, 0x88, 0xcb, 0xe1,
- 0x3a, 0x58, 0xdc, 0xc7, 0xdd, 0x1e, 0xb9, 0xf9, 0xc4, 0xf3, 0x89, 0x48, 0x41, 0x7d, 0x52, 0x98,
- 0x9e, 0x97, 0xa6, 0x8b, 0xdb, 0x59, 0x35, 0xca, 0xdb, 0xab, 0x5d, 0x50, 0x4b, 0xbe, 0x76, 0xb0,
- 0xef, 0x50, 0xa7, 0x05, 0x5f, 0x07, 0x33, 0x7b, 0x94, 0x74, 0x2d, 0x44, 0xf6, 0x24, 0x60, 0x55,
- 0x02, 0xce, 0xdc, 0x92, 0x72, 0x14, 0x5b, 0xc0, 0x15, 0x70, 0xe6, 0xf3, 0xd0, 0xb1, 0x5e, 0x12,
- 0xc6, 0x8b, 0xd2, 0xf8, 0x8c, 0xc4, 0x43, 0x91, 0x5e, 0xdd, 0x03, 0x0b, 0x1b, 0x38, 0x30, 0xdb,
- 0xd7, 0x5d, 0xc7, 0xa2, 0x22, 0xc2, 0x65, 0x50, 0x76, 0xb0, 0x4d, 0x64, 0x88, 0x73, 0xd2, 0xb3,
- 0xfc, 0x00, 0xdb, 0x04, 0x09, 0x0d, 0x5c, 0x03, 0x80, 0xe4, 0xe3, 0x83, 0xd2, 0x0e, 0xa4, 0x42,
- 0x4b, 0x59, 0xa9, 0x3f, 0x97, 0x25, 0x11, 0x22, 0xcc, 0xed, 0xf9, 0x26, 0x61, 0xf0, 0x09, 0xa8,
- 0x71, 0x38, 0xe6, 0x61, 0x93, 0x3c, 0x24, 0x5d, 0x62, 0x06, 0xae, 0x2f, 0x58, 0x2b, 0x6b, 0x57,
- 0xb4, 0xa4, 0x4e, 0xe3, 0x1d, 0xd3, 0xbc, 0x4e, 0x8b, 0x0b, 0x98, 0xc6, 0x0b, 0x43, 0xdb, 0x5f,
- 0xd5, 0xee, 0xe3, 0x5d, 0xd2, 0x8d, 0x5c, 0x8d, 0x73, 0x83, 0x7e, 0xb3, 0xf6, 0x20, 0x8f, 0x88,
- 0x8a, 0x24, 0xd0, 0x05, 0x0b, 0xee, 0xee, 0x63, 0x62, 0x06, 0x31, 0xed, 0xe4, 0xcb, 0xd3, 0xc2,
- 0x41, 0xbf, 0xb9, 0xb0, 0x99, 0x81, 0x43, 0x39, 0x78, 0xf8, 0x15, 0x98, 0xf7, 0x65, 0xdc, 0xa8,
- 0xd7, 0x25, 0xac, 0x5e, 0x5a, 0x2e, 0x5d, 0xae, 0xac, 0x19, 0xda, 0xc8, 0xed, 0xa8, 0xf1, 0xc0,
- 0x2c, 0xee, 0xbc, 0x43, 0x83, 0xf6, 0xa6, 0x47, 0x42, 0x3d, 0x33, 0xce, 0xc9, 0xc4, 0xcf, 0xa3,
- 0x34, 0x01, 0xca, 0xf2, 0xc1, 0xef, 0x15, 0x70, 0x96, 0x3c, 0x31, 0xbb, 0x3d, 0x8b, 0x64, 0xec,
- 0xea, 0xe5, 0x53, 0x5b, 0xc8, 0xab, 0x72, 0x21, 0x67, 0x6f, 0x0e, 0xe1, 0x41, 0x43, 0xd9, 0xe1,
- 0x0d, 0x50, 0xb1, 0x79, 0x51, 0x6c, 0xb9, 0x5d, 0x6a, 0x1e, 0xd4, 0xcf, 0x88, 0x52, 0x52, 0x07,
- 0xfd, 0x66, 0x65, 0x23, 0x11, 0x1f, 0xf5, 0x9b, 0x8b, 0xa9, 0xcf, 0x8f, 0x0e, 0x3c, 0x82, 0xd2,
- 0x6e, 0xea, 0x0b, 0x05, 0x9c, 0x3f, 0x66, 0x55, 0xf0, 0x5a, 0x92, 0x79, 0x51, 0x1a, 0x75, 0x65,
- 0xb9, 0x74, 0x79, 0xd6, 0xa8, 0xa5, 0x33, 0x26, 0x14, 0x28, 0x6b, 0x07, 0xbf, 0x56, 0x00, 0xf4,
- 0x0b, 0x78, 0xb2, 0x50, 0xae, 0x8d, 0x92, 0x2f, 0x6d, 0x48, 0x92, 0x96, 0x64, 0x92, 0x60, 0x51,
- 0x87, 0x86, 0xd0, 0xa9, 0x18, 0xcc, 0x6e, 0x61, 0x1f, 0xdb, 0xf7, 0xa8, 0x63, 0xf1, 0xbe, 0xc3,
- 0x1e, 0xdd, 0x26, 0xbe, 0xe8, 0x3b, 0x25, 0xdb, 0x77, 0xeb, 0x5b, 0x77, 0xa4, 0x06, 0xa5, 0xac,
- 0x78, 0x37, 0x77, 0xa8, 0x63, 0xc9, 0x2e, 0x8d, 0xbb, 0x99, 0xe3, 0x21, 0xa1, 0x51, 0x1f, 0x81,
- 0x19, 0x41, 0xc1, 0x0f, 0x8e, 0x93, 0x7b, 0x5f, 0x07, 0xb3, 0x71, 0x3f, 0x49, 0xd0, 0x9a, 0x34,
- 0x9b, 0x8d, 0x7b, 0x0f, 0x25, 0x36, 0xea, 0x0f, 0x0a, 0x98, 0xe3, 0x5b, 0x76, 0xbd, 0x4d, 0xcc,
- 0x0e, 0x3f, 0xca, 0xbe, 0x51, 0x00, 0x24, 0xf9, 0x03, 0x2e, 0xdc, 0x97, 0xca, 0xda, 0xfb, 0x63,
- 0x14, 0x62, 0xe1, 0x94, 0x4c, 0xb2, 0x5b, 0x50, 0x31, 0x34, 0x84, 0x53, 0xfd, 0x65, 0x12, 0x5c,
- 0xd8, 0xc6, 0x5d, 0x6a, 0xe1, 0x80, 0x3a, 0xad, 0xf5, 0x88, 0x2e, 0x2c, 0x2b, 0xf8, 0x29, 0x98,
- 0xe1, 0x1d, 0x6f, 0xe1, 0x00, 0xcb, 0x63, 0xe9, 0xcd, 0xd1, 0xce, 0x87, 0xf0, 0x30, 0xd8, 0x20,
- 0x01, 0x4e, 0xb6, 0x27, 0x91, 0xa1, 0x18, 0x15, 0x3e, 0x06, 0x65, 0xe6, 0x11, 0x53, 0x16, 0xd5,
- 0x87, 0x63, 0xc4, 0x7e, 0xec, 0xaa, 0x1f, 0x7a, 0xc4, 0x4c, 0x36, 0x8e, 0x7f, 0x21, 0xc1, 0x01,
- 0x7d, 0x30, 0xcd, 0x02, 0x1c, 0xf4, 0x98, 0xb8, 0x12, 0x2a, 0x6b, 0x77, 0x4f, 0x85, 0x4d, 0x20,
- 0x1a, 0x0b, 0x92, 0x6f, 0x3a, 0xfc, 0x46, 0x92, 0x49, 0xfd, 0x53, 0x01, 0xcb, 0xc7, 0xfa, 0x1a,
- 0xd4, 0xb1, 0x78, 0x3d, 0xfc, 0xf7, 0x69, 0xfe, 0x2c, 0x93, 0xe6, 0xcd, 0xd3, 0x08, 0x5c, 0x2e,
- 0xfe, 0xb8, 0x6c, 0xab, 0x7f, 0x28, 0xe0, 0xd2, 0x49, 0xce, 0xf7, 0x29, 0x0b, 0xe0, 0x27, 0x85,
- 0xe8, 0xb5, 0x11, 0x2f, 0x21, 0xca, 0xc2, 0xd8, 0xe3, 0x41, 0x20, 0x92, 0xa4, 0x22, 0xf7, 0xc0,
- 0x14, 0x0d, 0x88, 0xcd, 0x8f, 0x2d, 0xde, 0x5d, 0xf7, 0x4e, 0x31, 0x74, 0x63, 0x5e, 0xf2, 0x4e,
- 0xdd, 0xe1, 0x0c, 0x28, 0x24, 0x52, 0xbf, 0x2d, 0x9d, 0x1c, 0x38, 0xcf, 0x13, 0x3f, 0xcc, 0x3c,
- 0x21, 0x7c, 0x90, 0x1c, 0x38, 0xf1, 0x36, 0x6e, 0xc5, 0x1a, 0x94, 0xb2, 0x82, 0x8f, 0xc0, 0x8c,
- 0x27, 0x8f, 0xaa, 0x21, 0x37, 0xf6, 0x49, 0x11, 0x45, 0xa7, 0x9c, 0x31, 0xc7, 0xb3, 0x15, 0x7d,
- 0xa1, 0x18, 0x12, 0xf6, 0xc0, 0x82, 0x9d, 0x19, 0x51, 0x64, 0xab, 0xbc, 0x3b, 0x06, 0x49, 0x76,
- 0xc6, 0x09, 0x87, 0x83, 0xac, 0x0c, 0xe5, 0x48, 0xe0, 0x0e, 0xa8, 0xed, 0xcb, 0x8c, 0xb9, 0xce,
- 0xba, 0x19, 0xde, 0x33, 0x65, 0x71, 0x4d, 0xad, 0xf0, 0x91, 0x66, 0x3b, 0xaf, 0x3c, 0xea, 0x37,
- 0xab, 0x79, 0x21, 0x2a, 0x62, 0xa8, 0xbf, 0x2b, 0xe0, 0xe2, 0xb1, 0x7b, 0xf1, 0x3f, 0x54, 0x1f,
- 0xcd, 0x56, 0xdf, 0x8d, 0x53, 0xa9, 0xbe, 0xe1, 0x65, 0xf7, 0xe3, 0xd4, 0x3f, 0x84, 0x2a, 0xea,
- 0x0d, 0x83, 0x59, 0x2f, 0xba, 0x49, 0x65, 0xac, 0x57, 0xc7, 0x2d, 0x1e, 0xee, 0x6b, 0xcc, 0xf3,
- 0xab, 0x2e, 0xfe, 0x44, 0x09, 0x2a, 0xfc, 0x02, 0x54, 0x6d, 0x39, 0x4b, 0x73, 0x00, 0xea, 0x04,
- 0xd1, 0xbc, 0xf0, 0x2f, 0x2a, 0xe8, 0xec, 0xa0, 0xdf, 0xac, 0x6e, 0xe4, 0x60, 0x51, 0x81, 0x08,
- 0x76, 0x41, 0x25, 0xa9, 0x80, 0x68, 0xc0, 0x7c, 0xeb, 0x25, 0x52, 0xee, 0x3a, 0xc6, 0x2b, 0x32,
- 0xc7, 0x95, 0x44, 0xc6, 0x50, 0x1a, 0x1e, 0xde, 0x07, 0xf3, 0x7b, 0x98, 0x76, 0x7b, 0x3e, 0x91,
- 0xa3, 0x5b, 0x59, 0x34, 0xf0, 0x6b, 0x7c, 0xac, 0xba, 0x95, 0x56, 0x1c, 0xf5, 0x9b, 0xb5, 0x8c,
- 0x40, 0x8c, 0x6f, 0x59, 0x67, 0xf8, 0x54, 0x01, 0x55, 0x9c, 0x7d, 0x68, 0xb1, 0xfa, 0x94, 0x88,
- 0xe0, 0xbd, 0x31, 0x22, 0xc8, 0xbd, 0xd5, 0x8c, 0xba, 0x0c, 0xa3, 0x9a, 0x53, 0x30, 0x54, 0x60,
- 0x83, 0x5f, 0x82, 0x45, 0x3b, 0xf3, 0x0e, 0x62, 0xf5, 0x69, 0xb1, 0x80, 0xb1, 0xb7, 0x2e, 0x46,
- 0x48, 0xde, 0x7c, 0x59, 0x39, 0x43, 0x79, 0x2a, 0xf5, 0xa7, 0x49, 0xd0, 0x3c, 0xe1, 0x92, 0x85,
- 0x77, 0x01, 0x74, 0x77, 0x19, 0xf1, 0xf7, 0x89, 0x75, 0x3b, 0x7c, 0xa7, 0x46, 0x53, 0x60, 0x29,
- 0x19, 0x7c, 0x36, 0x0b, 0x16, 0x68, 0x88, 0x17, 0xb4, 0xc1, 0x5c, 0x90, 0x9a, 0xc9, 0xc6, 0x99,
- 0x6a, 0x65, 0xa8, 0xe9, 0x91, 0xce, 0xa8, 0x0e, 0xfa, 0xcd, 0xcc, 0x90, 0x87, 0x32, 0xf0, 0xd0,
- 0x04, 0xc0, 0x4c, 0xf2, 0x1a, 0x96, 0xa6, 0x3e, 0xda, 0x41, 0x93, 0x64, 0x33, 0xbe, 0x1c, 0x52,
- 0x89, 0x4c, 0xc1, 0xaa, 0x7f, 0x29, 0x00, 0x24, 0xf5, 0x0a, 0x2f, 0x81, 0xd4, 0x53, 0x54, 0xde,
- 0x2f, 0x65, 0x0e, 0x81, 0x52, 0x72, 0xfe, 0x52, 0xb6, 0x09, 0x63, 0xb8, 0x15, 0x0d, 0xb3, 0xf1,
- 0x4b, 0x79, 0x23, 0x14, 0xa3, 0x48, 0x0f, 0x77, 0xc0, 0xb4, 0x4f, 0x30, 0x73, 0x1d, 0xf9, 0xa6,
- 0xfe, 0x80, 0x0f, 0x3c, 0x48, 0x48, 0x8e, 0xfa, 0xcd, 0xd5, 0x51, 0xfe, 0xc9, 0xd0, 0xe4, 0x7c,
- 0x24, 0x9c, 0x90, 0x84, 0x83, 0xb7, 0x41, 0x4d, 0x72, 0xa4, 0x16, 0x1c, 0xf6, 0xd3, 0x05, 0xb9,
- 0x9a, 0xda, 0x46, 0xde, 0x00, 0x15, 0x7d, 0x8c, 0xcd, 0x67, 0x87, 0x8d, 0x89, 0xe7, 0x87, 0x8d,
- 0x89, 0x17, 0x87, 0x8d, 0x89, 0xa7, 0x83, 0x86, 0xf2, 0x6c, 0xd0, 0x50, 0x9e, 0x0f, 0x1a, 0xca,
- 0x8b, 0x41, 0x43, 0xf9, 0x75, 0xd0, 0x50, 0xbe, 0xfb, 0xad, 0x31, 0xf1, 0xf1, 0xca, 0xc8, 0xff,
- 0x1e, 0xfd, 0x1d, 0x00, 0x00, 0xff, 0xff, 0x08, 0xaf, 0xaa, 0x52, 0x82, 0x12, 0x00, 0x00,
+ 0x18, 0xcf, 0xc6, 0x6e, 0x12, 0x8f, 0xf3, 0xf2, 0xd0, 0x2a, 0x6e, 0xa0, 0xde, 0x68, 0x55, 0xa1,
+ 0x46, 0x82, 0x35, 0x49, 0x0b, 0x85, 0x0a, 0x09, 0x65, 0xfb, 0xa2, 0x8f, 0x3c, 0x34, 0x45, 0x89,
+ 0x84, 0x40, 0x62, 0xb2, 0x3b, 0x71, 0xa6, 0xf6, 0x3e, 0xd8, 0x59, 0x9b, 0x46, 0x20, 0x51, 0x89,
+ 0x0b, 0xdc, 0x38, 0x70, 0xe1, 0xca, 0x9f, 0xc0, 0x7f, 0xc0, 0xad, 0xc7, 0x1e, 0xcb, 0x01, 0x8b,
+ 0x9a, 0x0b, 0x7f, 0x01, 0x48, 0xb9, 0x80, 0x66, 0x76, 0xf6, 0x69, 0x9b, 0xd8, 0x25, 0x70, 0xf3,
+ 0x7c, 0x8f, 0xdf, 0xf7, 0x98, 0xef, 0xfb, 0xf6, 0x1b, 0x03, 0xd4, 0x7c, 0x9b, 0xe9, 0xd4, 0xad,
+ 0x37, 0xdb, 0xfb, 0xc4, 0x77, 0x48, 0x40, 0x58, 0xbd, 0x43, 0x1c, 0xcb, 0xf5, 0xeb, 0x92, 0x81,
+ 0x3d, 0x5a, 0xc7, 0x96, 0x4d, 0x19, 0xa3, 0xae, 0xe3, 0x93, 0x06, 0x65, 0x81, 0x8f, 0x03, 0xea,
+ 0x3a, 0xf5, 0xce, 0x1a, 0x6e, 0x79, 0x87, 0x78, 0xad, 0xde, 0x20, 0x0e, 0xf1, 0x71, 0x40, 0x2c,
+ 0xdd, 0xf3, 0xdd, 0xc0, 0x85, 0xab, 0xa1, 0xaa, 0x8e, 0x3d, 0xaa, 0x0f, 0x54, 0xd5, 0x23, 0xd5,
+ 0xe5, 0xd7, 0x1b, 0x34, 0x38, 0x6c, 0xef, 0xeb, 0xa6, 0x6b, 0xd7, 0x1b, 0x6e, 0xc3, 0xad, 0x0b,
+ 0x84, 0xfd, 0xf6, 0x81, 0x38, 0x89, 0x83, 0xf8, 0x15, 0x22, 0x2f, 0x5f, 0x1e, 0xc1, 0xa9, 0xbc,
+ 0x3b, 0xcb, 0x57, 0x12, 0x25, 0x1b, 0x9b, 0x87, 0xd4, 0x21, 0xfe, 0x51, 0xdd, 0x6b, 0x36, 0x38,
+ 0x81, 0xd5, 0x6d, 0x12, 0xe0, 0x41, 0x5a, 0xf5, 0x61, 0x5a, 0x7e, 0xdb, 0x09, 0xa8, 0x4d, 0xfa,
+ 0x14, 0xde, 0x3a, 0x49, 0x81, 0x99, 0x87, 0xc4, 0xc6, 0x79, 0x3d, 0x8d, 0x81, 0x85, 0x8d, 0xb6,
+ 0x45, 0x83, 0x0d, 0xc7, 0x71, 0x03, 0x11, 0x04, 0xbc, 0x00, 0x0a, 0x4d, 0x72, 0x54, 0x55, 0x56,
+ 0x94, 0x4b, 0x25, 0xa3, 0xfc, 0xa4, 0xab, 0x4e, 0xf4, 0xba, 0x6a, 0xe1, 0x1e, 0x39, 0x42, 0x9c,
+ 0x0e, 0x37, 0xc0, 0x42, 0x07, 0xb7, 0xda, 0xe4, 0xe6, 0x23, 0xcf, 0x27, 0x22, 0x05, 0xd5, 0x49,
+ 0x21, 0xba, 0x24, 0x45, 0x17, 0x76, 0xb3, 0x6c, 0x94, 0x97, 0xd7, 0x5a, 0xa0, 0x92, 0x9c, 0xf6,
+ 0xb0, 0xef, 0x50, 0xa7, 0x01, 0x5f, 0x03, 0x33, 0x07, 0x94, 0xb4, 0x2c, 0x44, 0x0e, 0x24, 0xe0,
+ 0xa2, 0x04, 0x9c, 0xb9, 0x25, 0xe9, 0x28, 0x96, 0x80, 0xab, 0x60, 0xfa, 0xb3, 0x50, 0xb1, 0x5a,
+ 0x10, 0xc2, 0x0b, 0x52, 0x78, 0x5a, 0xe2, 0xa1, 0x88, 0xaf, 0x1d, 0x80, 0xf9, 0x4d, 0x1c, 0x98,
+ 0x87, 0xd7, 0x5d, 0xc7, 0xa2, 0x22, 0xc2, 0x15, 0x50, 0x74, 0xb0, 0x4d, 0x64, 0x88, 0xb3, 0x52,
+ 0xb3, 0xb8, 0x85, 0x6d, 0x82, 0x04, 0x07, 0xae, 0x03, 0x40, 0xf2, 0xf1, 0x41, 0x29, 0x07, 0x52,
+ 0xa1, 0xa5, 0xa4, 0xb4, 0x9f, 0x8b, 0xd2, 0x10, 0x22, 0xcc, 0x6d, 0xfb, 0x26, 0x61, 0xf0, 0x11,
+ 0xa8, 0x70, 0x38, 0xe6, 0x61, 0x93, 0x3c, 0x20, 0x2d, 0x62, 0x06, 0xae, 0x2f, 0xac, 0x96, 0xd7,
+ 0x2f, 0xeb, 0x49, 0x9d, 0xc6, 0x37, 0xa6, 0x7b, 0xcd, 0x06, 0x27, 0x30, 0x9d, 0x17, 0x86, 0xde,
+ 0x59, 0xd3, 0xef, 0xe3, 0x7d, 0xd2, 0x8a, 0x54, 0x8d, 0x73, 0xbd, 0xae, 0x5a, 0xd9, 0xca, 0x23,
+ 0xa2, 0x7e, 0x23, 0xd0, 0x05, 0xf3, 0xee, 0xfe, 0x43, 0x62, 0x06, 0xb1, 0xd9, 0xc9, 0x17, 0x37,
+ 0x0b, 0x7b, 0x5d, 0x75, 0x7e, 0x3b, 0x03, 0x87, 0x72, 0xf0, 0xf0, 0x4b, 0x30, 0xe7, 0xcb, 0xb8,
+ 0x51, 0xbb, 0x45, 0x58, 0xb5, 0xb0, 0x52, 0xb8, 0x54, 0x5e, 0x37, 0xf4, 0x91, 0xdb, 0x51, 0xe7,
+ 0x81, 0x59, 0x5c, 0x79, 0x8f, 0x06, 0x87, 0xdb, 0x1e, 0x09, 0xf9, 0xcc, 0x38, 0x27, 0x13, 0x3f,
+ 0x87, 0xd2, 0x06, 0x50, 0xd6, 0x1e, 0xfc, 0x4e, 0x01, 0x67, 0xc9, 0x23, 0xb3, 0xd5, 0xb6, 0x48,
+ 0x46, 0xae, 0x5a, 0x3c, 0x35, 0x47, 0x5e, 0x91, 0x8e, 0x9c, 0xbd, 0x39, 0xc0, 0x0e, 0x1a, 0x68,
+ 0x1d, 0xde, 0x00, 0x65, 0x9b, 0x17, 0xc5, 0x8e, 0xdb, 0xa2, 0xe6, 0x51, 0x75, 0x5a, 0x94, 0x92,
+ 0xd6, 0xeb, 0xaa, 0xe5, 0xcd, 0x84, 0x7c, 0xdc, 0x55, 0x17, 0x52, 0xc7, 0x0f, 0x8e, 0x3c, 0x82,
+ 0xd2, 0x6a, 0xda, 0x33, 0x05, 0x2c, 0x0d, 0xf1, 0x0a, 0x5e, 0x4d, 0x32, 0x2f, 0x4a, 0xa3, 0xaa,
+ 0xac, 0x14, 0x2e, 0x95, 0x8c, 0x4a, 0x3a, 0x63, 0x82, 0x81, 0xb2, 0x72, 0xf0, 0x2b, 0x05, 0x40,
+ 0xbf, 0x0f, 0x4f, 0x16, 0xca, 0xd5, 0x51, 0xf2, 0xa5, 0x0f, 0x48, 0xd2, 0xb2, 0x4c, 0x12, 0xec,
+ 0xe7, 0xa1, 0x01, 0xe6, 0x34, 0x0c, 0x4a, 0x3b, 0xd8, 0xc7, 0xf6, 0x3d, 0xea, 0x58, 0xbc, 0xef,
+ 0xb0, 0x47, 0x77, 0x89, 0x2f, 0xfa, 0x4e, 0xc9, 0xf6, 0xdd, 0xc6, 0xce, 0x1d, 0xc9, 0x41, 0x29,
+ 0x29, 0xde, 0xcd, 0x4d, 0xea, 0x58, 0xb2, 0x4b, 0xe3, 0x6e, 0xe6, 0x78, 0x48, 0x70, 0xb4, 0x1f,
+ 0x27, 0xc1, 0x8c, 0xb0, 0xc1, 0x27, 0xc7, 0xc9, 0xcd, 0x5f, 0x07, 0xa5, 0xb8, 0xa1, 0x24, 0x6a,
+ 0x45, 0x8a, 0x95, 0xe2, 0xe6, 0x43, 0x89, 0x0c, 0xfc, 0x18, 0xcc, 0xb0, 0xa8, 0xcd, 0x0a, 0x2f,
+ 0xde, 0x66, 0xb3, 0x7c, 0xd6, 0xc5, 0x0d, 0x16, 0x43, 0xc2, 0x00, 0x2c, 0x79, 0xdc, 0x7b, 0x12,
+ 0x10, 0x7f, 0xcb, 0x0d, 0x6e, 0xb9, 0x6d, 0xc7, 0xda, 0x30, 0x79, 0xf6, 0xaa, 0x45, 0xe1, 0xdd,
+ 0xb5, 0x5e, 0x57, 0x5d, 0xda, 0x19, 0x2c, 0x72, 0xdc, 0x55, 0x5f, 0x1e, 0xc2, 0x12, 0x65, 0x36,
+ 0x0c, 0x5a, 0xfb, 0x5e, 0x01, 0xb3, 0x5c, 0xe2, 0xfa, 0x21, 0x31, 0x9b, 0x7c, 0x40, 0x7f, 0xad,
+ 0x00, 0x48, 0xf2, 0x63, 0x3b, 0xac, 0xb6, 0xf2, 0xfa, 0xbb, 0x63, 0xb4, 0x57, 0xdf, 0xec, 0x4f,
+ 0x6a, 0xa6, 0x8f, 0xc5, 0xd0, 0x00, 0x9b, 0xda, 0x2f, 0x93, 0xe0, 0xfc, 0x2e, 0x6e, 0x51, 0x0b,
+ 0x07, 0xd4, 0x69, 0x6c, 0x44, 0xe6, 0xc2, 0x66, 0x81, 0x9f, 0x80, 0x19, 0x9e, 0x60, 0x0b, 0x07,
+ 0x58, 0x0e, 0xdb, 0x37, 0x46, 0xbb, 0x8e, 0x70, 0xc4, 0x6d, 0x92, 0x00, 0x27, 0x45, 0x97, 0xd0,
+ 0x50, 0x8c, 0x0a, 0x1f, 0x82, 0x22, 0xf3, 0x88, 0x29, 0x5b, 0xe5, 0xfd, 0x31, 0x62, 0x1f, 0xea,
+ 0xf5, 0x03, 0x8f, 0x98, 0x49, 0x35, 0xf2, 0x13, 0x12, 0x36, 0xa0, 0x0f, 0xa6, 0x58, 0x80, 0x83,
+ 0x36, 0x93, 0xa5, 0x75, 0xf7, 0x54, 0xac, 0x09, 0x44, 0x63, 0x5e, 0xda, 0x9b, 0x0a, 0xcf, 0x48,
+ 0x5a, 0xd2, 0xfe, 0x54, 0xc0, 0xca, 0x50, 0x5d, 0x83, 0x3a, 0x16, 0xaf, 0x87, 0xff, 0x3e, 0xcd,
+ 0x9f, 0x66, 0xd2, 0xbc, 0x7d, 0x1a, 0x81, 0x4b, 0xe7, 0x87, 0x65, 0x5b, 0xfb, 0x43, 0x01, 0x17,
+ 0x4f, 0x52, 0xbe, 0x4f, 0x59, 0x00, 0x3f, 0xea, 0x8b, 0x5e, 0x1f, 0xb1, 0xe7, 0x29, 0x0b, 0x63,
+ 0x8f, 0xd7, 0x9b, 0x88, 0x92, 0x8a, 0xdc, 0x03, 0x67, 0x68, 0x40, 0x6c, 0x3e, 0x8c, 0x79, 0x77,
+ 0xdd, 0x3b, 0xc5, 0xd0, 0x8d, 0x39, 0x69, 0xf7, 0xcc, 0x1d, 0x6e, 0x01, 0x85, 0x86, 0xb4, 0x6f,
+ 0x0a, 0x27, 0x07, 0xce, 0xf3, 0xc4, 0x47, 0xb4, 0x27, 0x88, 0x5b, 0xc9, 0x14, 0x8d, 0xaf, 0x71,
+ 0x27, 0xe6, 0xa0, 0x94, 0x14, 0x1f, 0x90, 0x9e, 0x9c, 0xbf, 0x03, 0xf6, 0x90, 0x93, 0x22, 0x8a,
+ 0x46, 0x77, 0x38, 0x20, 0xa3, 0x13, 0x8a, 0x21, 0x61, 0x1b, 0xcc, 0xdb, 0x99, 0xc5, 0x4b, 0xb6,
+ 0xca, 0x3b, 0x63, 0x18, 0xc9, 0x6e, 0x6e, 0xe1, 0xca, 0x93, 0xa5, 0xa1, 0x9c, 0x11, 0xb8, 0x07,
+ 0x2a, 0x1d, 0x99, 0x31, 0xd7, 0x09, 0xa7, 0x66, 0xb8, 0x6d, 0x94, 0x8c, 0x55, 0xbe, 0xa8, 0xed,
+ 0xe6, 0x99, 0xc7, 0x5d, 0x75, 0x31, 0x4f, 0x44, 0xfd, 0x18, 0xda, 0xef, 0x0a, 0xb8, 0x30, 0xf4,
+ 0x2e, 0xfe, 0x87, 0xea, 0xa3, 0xd9, 0xea, 0xbb, 0x71, 0x2a, 0xd5, 0x37, 0xb8, 0xec, 0x7e, 0x98,
+ 0xfa, 0x87, 0x50, 0x45, 0xbd, 0x61, 0x50, 0xf2, 0xa2, 0xfd, 0x40, 0xc6, 0x7a, 0x65, 0xdc, 0xe2,
+ 0xe1, 0xba, 0xc6, 0x1c, 0xff, 0x7e, 0xc7, 0x47, 0x94, 0xa0, 0xc2, 0xcf, 0xc1, 0xa2, 0x2d, 0x5f,
+ 0x08, 0x1c, 0x80, 0x3a, 0x41, 0xb4, 0x05, 0xfd, 0x8b, 0x0a, 0x3a, 0xdb, 0xeb, 0xaa, 0x8b, 0x9b,
+ 0x39, 0x58, 0xd4, 0x67, 0x08, 0xb6, 0x40, 0x39, 0xa9, 0x80, 0x68, 0x6d, 0x7e, 0xf3, 0x05, 0x52,
+ 0xee, 0x3a, 0xc6, 0x4b, 0x32, 0xc7, 0xe5, 0x84, 0xc6, 0x50, 0x1a, 0x1e, 0xde, 0x07, 0x73, 0x07,
+ 0x98, 0xb6, 0xda, 0x3e, 0x91, 0x0b, 0x69, 0xb8, 0x41, 0xbc, 0xca, 0x97, 0xc5, 0x5b, 0x69, 0xc6,
+ 0x71, 0x57, 0xad, 0x64, 0x08, 0x62, 0x5b, 0xc8, 0x2a, 0xc3, 0xc7, 0x0a, 0x58, 0xc4, 0xd9, 0xe7,
+ 0x23, 0xab, 0x9e, 0x11, 0x11, 0x5c, 0x1b, 0x23, 0x82, 0xdc, 0x0b, 0xd4, 0xa8, 0xca, 0x30, 0x16,
+ 0x73, 0x0c, 0x86, 0xfa, 0xac, 0xc1, 0x2f, 0xc0, 0x82, 0x9d, 0x79, 0xdd, 0xb1, 0xea, 0x94, 0x70,
+ 0x60, 0xec, 0xab, 0x8b, 0x11, 0x92, 0x97, 0x6c, 0x96, 0xce, 0x50, 0xde, 0x14, 0xb4, 0x40, 0xa9,
+ 0x83, 0x7d, 0x8a, 0xf7, 0xf9, 0x43, 0x63, 0x5a, 0xd8, 0xbd, 0x3c, 0xd6, 0xd5, 0x85, 0xba, 0xc9,
+ 0x7e, 0x19, 0x51, 0x18, 0x4a, 0x80, 0xb5, 0x9f, 0x26, 0x81, 0x7a, 0xc2, 0xa7, 0x1c, 0xde, 0x05,
+ 0xd0, 0xdd, 0x67, 0xc4, 0xef, 0x10, 0xeb, 0x76, 0xf8, 0xc6, 0x8f, 0x36, 0xe8, 0x42, 0xb2, 0x5e,
+ 0x6d, 0xf7, 0x49, 0xa0, 0x01, 0x5a, 0xd0, 0x06, 0xb3, 0x41, 0x6a, 0xf3, 0x1b, 0xe7, 0x45, 0x20,
+ 0x03, 0x4b, 0x2f, 0x8e, 0xc6, 0x62, 0xaf, 0xab, 0x66, 0x56, 0x49, 0x94, 0x81, 0x87, 0x26, 0x00,
+ 0x66, 0x72, 0x7b, 0x61, 0x03, 0xd4, 0x47, 0x1b, 0x67, 0xc9, 0x9d, 0xc5, 0x9f, 0xa0, 0xd4, 0x75,
+ 0xa5, 0x60, 0xb5, 0xbf, 0x14, 0x00, 0x92, 0xae, 0x80, 0x17, 0x41, 0xea, 0x19, 0x2f, 0xbf, 0x62,
+ 0x45, 0x0e, 0x81, 0x52, 0x74, 0xb8, 0x0a, 0xa6, 0x6d, 0xc2, 0x18, 0x6e, 0x44, 0xef, 0x80, 0xf8,
+ 0x5f, 0x86, 0xcd, 0x90, 0x8c, 0x22, 0x3e, 0xdc, 0x03, 0x53, 0x3e, 0xc1, 0xcc, 0x75, 0xe4, 0xff,
+ 0x11, 0xef, 0xf1, 0xb5, 0x0a, 0x09, 0xca, 0x71, 0x57, 0x5d, 0x1b, 0xe5, 0x5f, 0x20, 0x5d, 0x6e,
+ 0x61, 0x42, 0x09, 0x49, 0x38, 0x78, 0x1b, 0x54, 0xa4, 0x8d, 0x94, 0xc3, 0x61, 0xd7, 0x9e, 0x97,
+ 0xde, 0x54, 0x36, 0xf3, 0x02, 0xa8, 0x5f, 0x47, 0xbb, 0x0b, 0x66, 0xa2, 0xea, 0x82, 0x55, 0x50,
+ 0x4c, 0x7d, 0xbe, 0xc3, 0xc0, 0x05, 0x25, 0x97, 0x98, 0xc9, 0xc1, 0x89, 0x31, 0xb6, 0x9f, 0x3c,
+ 0xaf, 0x4d, 0x3c, 0x7d, 0x5e, 0x9b, 0x78, 0xf6, 0xbc, 0x36, 0xf1, 0xb8, 0x57, 0x53, 0x9e, 0xf4,
+ 0x6a, 0xca, 0xd3, 0x5e, 0x4d, 0x79, 0xd6, 0xab, 0x29, 0xbf, 0xf6, 0x6a, 0xca, 0xb7, 0xbf, 0xd5,
+ 0x26, 0x3e, 0x5c, 0x1d, 0xf9, 0x5f, 0xbc, 0xbf, 0x03, 0x00, 0x00, 0xff, 0xff, 0xad, 0xe2, 0x61,
+ 0x96, 0x0a, 0x14, 0x00, 0x00,
}
func (m *AuditAnnotation) Marshal() (dAtA []byte, err error) {
@@ -884,6 +920,25 @@ func (m *ParamRef) MarshalToSizedBuffer(dAtA []byte) (int, error) {
_ = i
var l int
_ = l
+ if m.ParameterNotFoundAction != nil {
+ i -= len(*m.ParameterNotFoundAction)
+ copy(dAtA[i:], *m.ParameterNotFoundAction)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ParameterNotFoundAction)))
+ i--
+ dAtA[i] = 0x22
+ }
+ if m.Selector != nil {
+ {
+ size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
i -= len(m.Namespace)
copy(dAtA[i:], m.Namespace)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace)))
@@ -1205,6 +1260,20 @@ func (m *ValidatingAdmissionPolicySpec) MarshalToSizedBuffer(dAtA []byte) (int,
_ = i
var l int
_ = l
+ if len(m.Variables) > 0 {
+ for iNdEx := len(m.Variables) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Variables[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x3a
+ }
+ }
if len(m.MatchConditions) > 0 {
for iNdEx := len(m.MatchConditions) - 1; iNdEx >= 0; iNdEx-- {
{
@@ -1378,6 +1447,39 @@ func (m *Validation) MarshalToSizedBuffer(dAtA []byte) (int, error) {
return len(dAtA) - i, nil
}
+func (m *Variable) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Variable) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Variable) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i -= len(m.Expression)
+ copy(dAtA[i:], m.Expression)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Expression)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
offset -= sovGenerated(v)
base := offset
@@ -1501,6 +1603,14 @@ func (m *ParamRef) Size() (n int) {
n += 1 + l + sovGenerated(uint64(l))
l = len(m.Namespace)
n += 1 + l + sovGenerated(uint64(l))
+ if m.Selector != nil {
+ l = m.Selector.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.ParameterNotFoundAction != nil {
+ l = len(*m.ParameterNotFoundAction)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
return n
}
@@ -1642,6 +1752,12 @@ func (m *ValidatingAdmissionPolicySpec) Size() (n int) {
n += 1 + l + sovGenerated(uint64(l))
}
}
+ if len(m.Variables) > 0 {
+ for _, e := range m.Variables {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
return n
}
@@ -1684,6 +1800,19 @@ func (m *Validation) Size() (n int) {
return n
}
+func (m *Variable) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Name)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Expression)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
func sovGenerated(x uint64) (n int) {
return (math_bits.Len64(x|1) + 6) / 7
}
@@ -1776,6 +1905,8 @@ func (this *ParamRef) String() string {
s := strings.Join([]string{`&ParamRef{`,
`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
`Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`,
+ `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`,
+ `ParameterNotFoundAction:` + valueToStringGenerated(this.ParameterNotFoundAction) + `,`,
`}`,
}, "")
return s
@@ -1882,6 +2013,11 @@ func (this *ValidatingAdmissionPolicySpec) String() string {
repeatedStringForMatchConditions += strings.Replace(strings.Replace(f.String(), "MatchCondition", "MatchCondition", 1), `&`, ``, 1) + ","
}
repeatedStringForMatchConditions += "}"
+ repeatedStringForVariables := "[]Variable{"
+ for _, f := range this.Variables {
+ repeatedStringForVariables += strings.Replace(strings.Replace(f.String(), "Variable", "Variable", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForVariables += "}"
s := strings.Join([]string{`&ValidatingAdmissionPolicySpec{`,
`ParamKind:` + strings.Replace(this.ParamKind.String(), "ParamKind", "ParamKind", 1) + `,`,
`MatchConstraints:` + strings.Replace(this.MatchConstraints.String(), "MatchResources", "MatchResources", 1) + `,`,
@@ -1889,6 +2025,7 @@ func (this *ValidatingAdmissionPolicySpec) String() string {
`FailurePolicy:` + valueToStringGenerated(this.FailurePolicy) + `,`,
`AuditAnnotations:` + repeatedStringForAuditAnnotations + `,`,
`MatchConditions:` + repeatedStringForMatchConditions + `,`,
+ `Variables:` + repeatedStringForVariables + `,`,
`}`,
}, "")
return s
@@ -1923,6 +2060,17 @@ func (this *Validation) String() string {
}, "")
return s
}
+func (this *Variable) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&Variable{`,
+ `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+ `Expression:` + fmt.Sprintf("%v", this.Expression) + `,`,
+ `}`,
+ }, "")
+ return s
+}
func valueToStringGenerated(v interface{}) string {
rv := reflect.ValueOf(v)
if rv.IsNil() {
@@ -2818,6 +2966,75 @@ func (m *ParamRef) Unmarshal(dAtA []byte) error {
}
m.Namespace = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Selector == nil {
+ m.Selector = &v1.LabelSelector{}
+ }
+ if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ParameterNotFoundAction", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := ParameterNotFoundActionType(dAtA[iNdEx:postIndex])
+ m.ParameterNotFoundAction = &s
+ iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
@@ -3844,6 +4061,40 @@ func (m *ValidatingAdmissionPolicySpec) Unmarshal(dAtA []byte) error {
return err
}
iNdEx = postIndex
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Variables", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Variables = append(m.Variables, Variable{})
+ if err := m.Variables[len(m.Variables)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
@@ -4183,6 +4434,120 @@ func (m *Validation) Unmarshal(dAtA []byte) error {
}
return nil
}
+func (m *Variable) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Variable: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Variable: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Expression", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Expression = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
func skipGenerated(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto b/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto
index c718c5464d..db02dd929f 100644
--- a/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto
+++ b/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto
@@ -227,16 +227,59 @@ message ParamKind {
optional string kind = 2;
}
-// ParamRef references a parameter resource
+// ParamRef describes how to locate the params to be used as input to
+// expressions of rules applied by a policy binding.
// +structType=atomic
message ParamRef {
- // Name of the resource being referenced.
+ // `name` is the name of the resource being referenced.
+ //
+ // `name` and `selector` are mutually exclusive properties. If one is set,
+ // the other must be unset.
+ //
+ // +optional
optional string name = 1;
- // Namespace of the referenced resource.
- // Should be empty for the cluster-scoped resources
+ // namespace is the namespace of the referenced resource. Allows limiting
+ // the search for params to a specific namespace. Applies to both `name` and
+ // `selector` fields.
+ //
+ // A per-namespace parameter may be used by specifying a namespace-scoped
+ // `paramKind` in the policy and leaving this field empty.
+ //
+ // - If `paramKind` is cluster-scoped, this field MUST be unset. Setting this
+ // field results in a configuration error.
+ //
+ // - If `paramKind` is namespace-scoped, the namespace of the object being
+ // evaluated for admission will be used when this field is left unset. Take
+ // care that if this is left empty the binding must not match any cluster-scoped
+ // resources, which will result in an error.
+ //
// +optional
optional string namespace = 2;
+
+ // selector can be used to match multiple param objects based on their labels.
+ // Supply selector: {} to match all resources of the ParamKind.
+ //
+ // If multiple params are found, they are all evaluated with the policy expressions
+ // and the results are ANDed together.
+ //
+ // One of `name` or `selector` must be set, but `name` and `selector` are
+ // mutually exclusive properties. If one is set, the other must be unset.
+ //
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 3;
+
+ // `parameterNotFoundAction` controls the behavior of the binding when the resource
+ // exists, and name or selector is valid, but there are no parameters
+ // matched by the binding. If the value is set to `Allow`, then no
+ // matched parameters will be treated as successful validation by the binding.
+ // If set to `Deny`, then no matched parameters will be subject to the
+ // `failurePolicy` of the policy.
+ //
+ // Allowed values are `Allow` or `Deny`
+ // Default to `Deny`
+ // +optional
+ optional string parameterNotFoundAction = 4;
}
// TypeChecking contains results of type checking the expressions in the
@@ -267,6 +310,15 @@ message ValidatingAdmissionPolicy {
// ValidatingAdmissionPolicyBinding binds the ValidatingAdmissionPolicy with paramerized resources.
// ValidatingAdmissionPolicyBinding and parameter CRDs together define how cluster administrators configure policies for clusters.
+//
+// For a given admission request, each binding will cause its policy to be
+// evaluated N times, where N is 1 for policies/bindings that don't use
+// params, otherwise N is the number of parameters selected by the binding.
+//
+// The CEL expressions of a policy must have a computed CEL cost below the maximum
+// CEL budget. Each evaluation of the policy is given an independent CEL cost budget.
+// Adding/removing policies, bindings, or params can not affect whether a
+// given (policy, binding, param) combination is within its own CEL budget.
message ValidatingAdmissionPolicyBinding {
// Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.
// +optional
@@ -294,9 +346,10 @@ message ValidatingAdmissionPolicyBindingSpec {
// Required.
optional string policyName = 1;
- // ParamRef specifies the parameter resource used to configure the admission control policy.
+ // paramRef specifies the parameter resource used to configure the admission control policy.
// It should point to a resource of the type specified in ParamKind of the bound ValidatingAdmissionPolicy.
// If the policy specifies a ParamKind and the resource referred to by ParamRef does not exist, this binding is considered mis-configured and the FailurePolicy of the ValidatingAdmissionPolicy applied.
+ // If the policy does not specify a ParamKind then this field is ignored, and the rules are evaluated without a param.
// +optional
optional ParamRef paramRef = 2;
@@ -430,6 +483,20 @@ message ValidatingAdmissionPolicySpec {
// +listMapKey=name
// +optional
repeated MatchCondition matchConditions = 6;
+
+ // Variables contain definitions of variables that can be used in composition of other expressions.
+ // Each variable is defined as a named CEL expression.
+ // The variables defined here will be available under `variables` in other expressions of the policy
+ // except MatchConditions because MatchConditions are evaluated before the rest of the policy.
+ //
+ // The expression of a variable can refer to other variables defined earlier in the list but not those after.
+ // Thus, Variables must be sorted by the order of first appearance and acyclic.
+ // +patchMergeKey=name
+ // +patchStrategy=merge
+ // +listType=map
+ // +listMapKey=name
+ // +optional
+ repeated Variable variables = 7;
}
// ValidatingAdmissionPolicyStatus represents the status of a ValidatingAdmissionPolicy.
@@ -460,6 +527,9 @@ message Validation {
// - 'oldObject' - The existing object. The value is null for CREATE requests.
// - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)).
// - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind.
+ // - 'namespaceObject' - The namespace object that the incoming object belongs to. The value is null for cluster-scoped resources.
+ // - 'variables' - Map of composited variables, from its name to its lazily evaluated value.
+ // For example, a variable named 'foo' can be accessed as 'variables.foo'.
// - 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request.
// See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz
// - 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the
@@ -525,3 +595,15 @@ message Validation {
optional string messageExpression = 4;
}
+// Variable is the definition of a variable that is used for composition.
+message Variable {
+ // Name is the name of the variable. The name must be a valid CEL identifier and unique among all variables.
+ // The variable can be accessed in other expressions through `variables`
+ // For example, if name is "foo", the variable will be available as `variables.foo`
+ optional string Name = 1;
+
+ // Expression is the expression that will be evaluated as the value of the variable.
+ // The CEL expression has access to the same identifiers as the CEL expressions in Validation.
+ optional string Expression = 2;
+}
+
diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/types.go b/vendor/k8s.io/api/admissionregistration/v1alpha1/types.go
index 2bbb55a47d..575456c838 100644
--- a/vendor/k8s.io/api/admissionregistration/v1alpha1/types.go
+++ b/vendor/k8s.io/api/admissionregistration/v1alpha1/types.go
@@ -39,6 +39,18 @@ const (
AllScopes ScopeType = v1.AllScopes
)
+// ParameterNotFoundActionType specifies a failure policy that defines how a binding
+// is evaluated when the param referred by its perNamespaceParamRef is not found.
+// +enum
+type ParameterNotFoundActionType string
+
+const (
+ // Ignore means that an error finding params for a binding is ignored
+ AllowAction ParameterNotFoundActionType = "Allow"
+ // Fail means that an error finding params for a binding is ignored
+ DenyAction ParameterNotFoundActionType = "Deny"
+)
+
// FailurePolicyType specifies a failure policy that defines how unrecognized errors from the admission endpoint are handled.
// +enum
type FailurePolicyType string
@@ -201,6 +213,20 @@ type ValidatingAdmissionPolicySpec struct {
// +listMapKey=name
// +optional
MatchConditions []MatchCondition `json:"matchConditions,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,6,rep,name=matchConditions"`
+
+ // Variables contain definitions of variables that can be used in composition of other expressions.
+ // Each variable is defined as a named CEL expression.
+ // The variables defined here will be available under `variables` in other expressions of the policy
+ // except MatchConditions because MatchConditions are evaluated before the rest of the policy.
+ //
+ // The expression of a variable can refer to other variables defined earlier in the list but not those after.
+ // Thus, Variables must be sorted by the order of first appearance and acyclic.
+ // +patchMergeKey=name
+ // +patchStrategy=merge
+ // +listType=map
+ // +listMapKey=name
+ // +optional
+ Variables []Variable `json:"variables" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,7,rep,name=variables"`
}
type MatchCondition v1.MatchCondition
@@ -228,6 +254,9 @@ type Validation struct {
// - 'oldObject' - The existing object. The value is null for CREATE requests.
// - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)).
// - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind.
+ // - 'namespaceObject' - The namespace object that the incoming object belongs to. The value is null for cluster-scoped resources.
+ // - 'variables' - Map of composited variables, from its name to its lazily evaluated value.
+ // For example, a variable named 'foo' can be accessed as 'variables.foo'.
// - 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request.
// See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz
// - 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the
@@ -290,6 +319,18 @@ type Validation struct {
MessageExpression string `json:"messageExpression,omitempty" protobuf:"bytes,4,opt,name=messageExpression"`
}
+// Variable is the definition of a variable that is used for composition.
+type Variable struct {
+ // Name is the name of the variable. The name must be a valid CEL identifier and unique among all variables.
+ // The variable can be accessed in other expressions through `variables`
+ // For example, if name is "foo", the variable will be available as `variables.foo`
+ Name string `json:"name" protobuf:"bytes,1,opt,name=Name"`
+
+ // Expression is the expression that will be evaluated as the value of the variable.
+ // The CEL expression has access to the same identifiers as the CEL expressions in Validation.
+ Expression string `json:"expression" protobuf:"bytes,2,opt,name=Expression"`
+}
+
// AuditAnnotation describes how to produce an audit annotation for an API request.
type AuditAnnotation struct {
// key specifies the audit annotation key. The audit annotation keys of
@@ -334,6 +375,15 @@ type AuditAnnotation struct {
// ValidatingAdmissionPolicyBinding binds the ValidatingAdmissionPolicy with paramerized resources.
// ValidatingAdmissionPolicyBinding and parameter CRDs together define how cluster administrators configure policies for clusters.
+//
+// For a given admission request, each binding will cause its policy to be
+// evaluated N times, where N is 1 for policies/bindings that don't use
+// params, otherwise N is the number of parameters selected by the binding.
+//
+// The CEL expressions of a policy must have a computed CEL cost below the maximum
+// CEL budget. Each evaluation of the policy is given an independent CEL cost budget.
+// Adding/removing policies, bindings, or params can not affect whether a
+// given (policy, binding, param) combination is within its own CEL budget.
type ValidatingAdmissionPolicyBinding struct {
metav1.TypeMeta `json:",inline"`
// Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.
@@ -364,9 +414,10 @@ type ValidatingAdmissionPolicyBindingSpec struct {
// Required.
PolicyName string `json:"policyName,omitempty" protobuf:"bytes,1,rep,name=policyName"`
- // ParamRef specifies the parameter resource used to configure the admission control policy.
+ // paramRef specifies the parameter resource used to configure the admission control policy.
// It should point to a resource of the type specified in ParamKind of the bound ValidatingAdmissionPolicy.
// If the policy specifies a ParamKind and the resource referred to by ParamRef does not exist, this binding is considered mis-configured and the FailurePolicy of the ValidatingAdmissionPolicy applied.
+ // If the policy does not specify a ParamKind then this field is ignored, and the rules are evaluated without a param.
// +optional
ParamRef *ParamRef `json:"paramRef,omitempty" protobuf:"bytes,2,rep,name=paramRef"`
@@ -421,15 +472,59 @@ type ValidatingAdmissionPolicyBindingSpec struct {
ValidationActions []ValidationAction `json:"validationActions,omitempty" protobuf:"bytes,4,rep,name=validationActions"`
}
-// ParamRef references a parameter resource
+// ParamRef describes how to locate the params to be used as input to
+// expressions of rules applied by a policy binding.
// +structType=atomic
type ParamRef struct {
- // Name of the resource being referenced.
+ // `name` is the name of the resource being referenced.
+ //
+ // `name` and `selector` are mutually exclusive properties. If one is set,
+ // the other must be unset.
+ //
+ // +optional
Name string `json:"name,omitempty" protobuf:"bytes,1,rep,name=name"`
- // Namespace of the referenced resource.
- // Should be empty for the cluster-scoped resources
+
+ // namespace is the namespace of the referenced resource. Allows limiting
+ // the search for params to a specific namespace. Applies to both `name` and
+ // `selector` fields.
+ //
+ // A per-namespace parameter may be used by specifying a namespace-scoped
+ // `paramKind` in the policy and leaving this field empty.
+ //
+ // - If `paramKind` is cluster-scoped, this field MUST be unset. Setting this
+ // field results in a configuration error.
+ //
+ // - If `paramKind` is namespace-scoped, the namespace of the object being
+ // evaluated for admission will be used when this field is left unset. Take
+ // care that if this is left empty the binding must not match any cluster-scoped
+ // resources, which will result in an error.
+ //
// +optional
Namespace string `json:"namespace,omitempty" protobuf:"bytes,2,rep,name=namespace"`
+
+ // selector can be used to match multiple param objects based on their labels.
+ // Supply selector: {} to match all resources of the ParamKind.
+ //
+ // If multiple params are found, they are all evaluated with the policy expressions
+ // and the results are ANDed together.
+ //
+ // One of `name` or `selector` must be set, but `name` and `selector` are
+ // mutually exclusive properties. If one is set, the other must be unset.
+ //
+ // +optional
+ Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,3,rep,name=selector"`
+
+ // `parameterNotFoundAction` controls the behavior of the binding when the resource
+ // exists, and name or selector is valid, but there are no parameters
+ // matched by the binding. If the value is set to `Allow`, then no
+ // matched parameters will be treated as successful validation by the binding.
+ // If set to `Deny`, then no matched parameters will be subject to the
+ // `failurePolicy` of the policy.
+ //
+ // Allowed values are `Allow` or `Deny`
+ // Default to `Deny`
+ // +optional
+ ParameterNotFoundAction *ParameterNotFoundActionType `json:"parameterNotFoundAction,omitempty" protobuf:"bytes,4,rep,name=parameterNotFoundAction"`
}
// MatchResources decides whether to run the admission control policy on an object based
diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/admissionregistration/v1alpha1/types_swagger_doc_generated.go
index b3cac1821b..dcf46b324f 100644
--- a/vendor/k8s.io/api/admissionregistration/v1alpha1/types_swagger_doc_generated.go
+++ b/vendor/k8s.io/api/admissionregistration/v1alpha1/types_swagger_doc_generated.go
@@ -80,9 +80,11 @@ func (ParamKind) SwaggerDoc() map[string]string {
}
var map_ParamRef = map[string]string{
- "": "ParamRef references a parameter resource",
- "name": "Name of the resource being referenced.",
- "namespace": "Namespace of the referenced resource. Should be empty for the cluster-scoped resources",
+ "": "ParamRef describes how to locate the params to be used as input to expressions of rules applied by a policy binding.",
+ "name": "`name` is the name of the resource being referenced.\n\n`name` and `selector` are mutually exclusive properties. If one is set, the other must be unset.",
+ "namespace": "namespace is the namespace of the referenced resource. Allows limiting the search for params to a specific namespace. Applies to both `name` and `selector` fields.\n\nA per-namespace parameter may be used by specifying a namespace-scoped `paramKind` in the policy and leaving this field empty.\n\n- If `paramKind` is cluster-scoped, this field MUST be unset. Setting this field results in a configuration error.\n\n- If `paramKind` is namespace-scoped, the namespace of the object being evaluated for admission will be used when this field is left unset. Take care that if this is left empty the binding must not match any cluster-scoped resources, which will result in an error.",
+ "selector": "selector can be used to match multiple param objects based on their labels. Supply selector: {} to match all resources of the ParamKind.\n\nIf multiple params are found, they are all evaluated with the policy expressions and the results are ANDed together.\n\nOne of `name` or `selector` must be set, but `name` and `selector` are mutually exclusive properties. If one is set, the other must be unset.",
+ "parameterNotFoundAction": "`parameterNotFoundAction` controls the behavior of the binding when the resource exists, and name or selector is valid, but there are no parameters matched by the binding. If the value is set to `Allow`, then no matched parameters will be treated as successful validation by the binding. If set to `Deny`, then no matched parameters will be subject to the `failurePolicy` of the policy.\n\nAllowed values are `Allow` or `Deny` Default to `Deny`",
}
func (ParamRef) SwaggerDoc() map[string]string {
@@ -110,7 +112,7 @@ func (ValidatingAdmissionPolicy) SwaggerDoc() map[string]string {
}
var map_ValidatingAdmissionPolicyBinding = map[string]string{
- "": "ValidatingAdmissionPolicyBinding binds the ValidatingAdmissionPolicy with paramerized resources. ValidatingAdmissionPolicyBinding and parameter CRDs together define how cluster administrators configure policies for clusters.",
+ "": "ValidatingAdmissionPolicyBinding binds the ValidatingAdmissionPolicy with paramerized resources. ValidatingAdmissionPolicyBinding and parameter CRDs together define how cluster administrators configure policies for clusters.\n\nFor a given admission request, each binding will cause its policy to be evaluated N times, where N is 1 for policies/bindings that don't use params, otherwise N is the number of parameters selected by the binding.\n\nThe CEL expressions of a policy must have a computed CEL cost below the maximum CEL budget. Each evaluation of the policy is given an independent CEL cost budget. Adding/removing policies, bindings, or params can not affect whether a given (policy, binding, param) combination is within its own CEL budget.",
"metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.",
"spec": "Specification of the desired behavior of the ValidatingAdmissionPolicyBinding.",
}
@@ -132,7 +134,7 @@ func (ValidatingAdmissionPolicyBindingList) SwaggerDoc() map[string]string {
var map_ValidatingAdmissionPolicyBindingSpec = map[string]string{
"": "ValidatingAdmissionPolicyBindingSpec is the specification of the ValidatingAdmissionPolicyBinding.",
"policyName": "PolicyName references a ValidatingAdmissionPolicy name which the ValidatingAdmissionPolicyBinding binds to. If the referenced resource does not exist, this binding is considered invalid and will be ignored Required.",
- "paramRef": "ParamRef specifies the parameter resource used to configure the admission control policy. It should point to a resource of the type specified in ParamKind of the bound ValidatingAdmissionPolicy. If the policy specifies a ParamKind and the resource referred to by ParamRef does not exist, this binding is considered mis-configured and the FailurePolicy of the ValidatingAdmissionPolicy applied.",
+ "paramRef": "paramRef specifies the parameter resource used to configure the admission control policy. It should point to a resource of the type specified in ParamKind of the bound ValidatingAdmissionPolicy. If the policy specifies a ParamKind and the resource referred to by ParamRef does not exist, this binding is considered mis-configured and the FailurePolicy of the ValidatingAdmissionPolicy applied. If the policy does not specify a ParamKind then this field is ignored, and the rules are evaluated without a param.",
"matchResources": "MatchResources declares what resources match this binding and will be validated by it. Note that this is intersected with the policy's matchConstraints, so only requests that are matched by the policy can be selected by this. If this is unset, all resources matched by the policy are validated by this binding When resourceRules is unset, it does not constrain resource matching. If a resource is matched by the other fields of this object, it will be validated. Note that this is differs from ValidatingAdmissionPolicy matchConstraints, where resourceRules are required.",
"validationActions": "validationActions declares how Validations of the referenced ValidatingAdmissionPolicy are enforced. If a validation evaluates to false it is always enforced according to these actions.\n\nFailures defined by the ValidatingAdmissionPolicy's FailurePolicy are enforced according to these actions only if the FailurePolicy is set to Fail, otherwise the failures are ignored. This includes compilation errors, runtime errors and misconfigurations of the policy.\n\nvalidationActions is declared as a set of action values. Order does not matter. validationActions may not contain duplicates of the same action.\n\nThe supported actions values are:\n\n\"Deny\" specifies that a validation failure results in a denied request.\n\n\"Warn\" specifies that a validation failure is reported to the request client in HTTP Warning headers, with a warning code of 299. Warnings can be sent both for allowed or denied admission responses.\n\n\"Audit\" specifies that a validation failure is included in the published audit event for the request. The audit event will contain a `validation.policy.admission.k8s.io/validation_failure` audit annotation with a value containing the details of the validation failures, formatted as a JSON list of objects, each with the following fields: - message: The validation failure message string - policy: The resource name of the ValidatingAdmissionPolicy - binding: The resource name of the ValidatingAdmissionPolicyBinding - expressionIndex: The index of the failed validations in the ValidatingAdmissionPolicy - validationActions: The enforcement actions enacted for the validation failure Example audit annotation: `\"validation.policy.admission.k8s.io/validation_failure\": \"[{\"message\": \"Invalid value\", {\"policy\": \"policy.example.com\", {\"binding\": \"policybinding.example.com\", {\"expressionIndex\": \"1\", {\"validationActions\": [\"Audit\"]}]\"`\n\nClients should expect to handle additional values by ignoring any values not recognized.\n\n\"Deny\" and \"Warn\" may not be used together since this combination needlessly duplicates the validation failure both in the API response body and the HTTP warning headers.\n\nRequired.",
}
@@ -159,6 +161,7 @@ var map_ValidatingAdmissionPolicySpec = map[string]string{
"failurePolicy": "failurePolicy defines how to handle failures for the admission policy. Failures can occur from CEL expression parse errors, type check errors, runtime errors and invalid or mis-configured policy definitions or bindings.\n\nA policy is invalid if spec.paramKind refers to a non-existent Kind. A binding is invalid if spec.paramRef.name refers to a non-existent resource.\n\nfailurePolicy does not define how validations that evaluate to false are handled.\n\nWhen failurePolicy is set to Fail, ValidatingAdmissionPolicyBinding validationActions define how failures are enforced.\n\nAllowed values are Ignore or Fail. Defaults to Fail.",
"auditAnnotations": "auditAnnotations contains CEL expressions which are used to produce audit annotations for the audit event of the API request. validations and auditAnnotations may not both be empty; a least one of validations or auditAnnotations is required.",
"matchConditions": "MatchConditions is a list of conditions that must be met for a request to be validated. Match conditions filter requests that have already been matched by the rules, namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. There are a maximum of 64 match conditions allowed.\n\nIf a parameter object is provided, it can be accessed via the `params` handle in the same manner as validation expressions.\n\nThe exact matching logic is (in order):\n 1. If ANY matchCondition evaluates to FALSE, the policy is skipped.\n 2. If ALL matchConditions evaluate to TRUE, the policy is evaluated.\n 3. If any matchCondition evaluates to an error (but none are FALSE):\n - If failurePolicy=Fail, reject the request\n - If failurePolicy=Ignore, the policy is skipped",
+ "variables": "Variables contain definitions of variables that can be used in composition of other expressions. Each variable is defined as a named CEL expression. The variables defined here will be available under `variables` in other expressions of the policy except MatchConditions because MatchConditions are evaluated before the rest of the policy.\n\nThe expression of a variable can refer to other variables defined earlier in the list but not those after. Thus, Variables must be sorted by the order of first appearance and acyclic.",
}
func (ValidatingAdmissionPolicySpec) SwaggerDoc() map[string]string {
@@ -178,7 +181,7 @@ func (ValidatingAdmissionPolicyStatus) SwaggerDoc() map[string]string {
var map_Validation = map[string]string{
"": "Validation specifies the CEL expression which is used to apply the validation.",
- "expression": "Expression represents the expression which will be evaluated by CEL. ref: https://github.com/google/cel-spec CEL expressions have access to the contents of the API request/response, organized into CEL variables as well as some other useful variables:\n\n- 'object' - The object from the incoming request. The value is null for DELETE requests. - 'oldObject' - The existing object. The value is null for CREATE requests. - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)). - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind. - 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request.\n See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz\n- 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the\n request resource.\n\nThe `apiVersion`, `kind`, `metadata.name` and `metadata.generateName` are always accessible from the root of the object. No other metadata properties are accessible.\n\nOnly property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible. Accessible property names are escaped according to the following rules when accessed in the expression: - '__' escapes to '__underscores__' - '.' escapes to '__dot__' - '-' escapes to '__dash__' - '/' escapes to '__slash__' - Property names that exactly match a CEL RESERVED keyword escape to '__{keyword}__'. The keywords are:\n\t \"true\", \"false\", \"null\", \"in\", \"as\", \"break\", \"const\", \"continue\", \"else\", \"for\", \"function\", \"if\",\n\t \"import\", \"let\", \"loop\", \"package\", \"namespace\", \"return\".\nExamples:\n - Expression accessing a property named \"namespace\": {\"Expression\": \"object.__namespace__ > 0\"}\n - Expression accessing a property named \"x-prop\": {\"Expression\": \"object.x__dash__prop > 0\"}\n - Expression accessing a property named \"redact__d\": {\"Expression\": \"object.redact__underscores__d > 0\"}\n\nEquality on arrays with list type of 'set' or 'map' ignores element order, i.e. [1, 2] == [2, 1]. Concatenation on arrays with x-kubernetes-list-type use the semantics of the list type:\n - 'set': `X + Y` performs a union where the array positions of all elements in `X` are preserved and\n non-intersecting elements in `Y` are appended, retaining their partial order.\n - 'map': `X + Y` performs a merge where the array positions of all keys in `X` are preserved but the values\n are overwritten by values in `Y` when the key sets of `X` and `Y` intersect. Elements in `Y` with\n non-intersecting keys are appended, retaining their partial order.\nRequired.",
+ "expression": "Expression represents the expression which will be evaluated by CEL. ref: https://github.com/google/cel-spec CEL expressions have access to the contents of the API request/response, organized into CEL variables as well as some other useful variables:\n\n- 'object' - The object from the incoming request. The value is null for DELETE requests. - 'oldObject' - The existing object. The value is null for CREATE requests. - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)). - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind. - 'namespaceObject' - The namespace object that the incoming object belongs to. The value is null for cluster-scoped resources. - 'variables' - Map of composited variables, from its name to its lazily evaluated value.\n For example, a variable named 'foo' can be accessed as 'variables.foo'.\n- 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request.\n See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz\n- 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the\n request resource.\n\nThe `apiVersion`, `kind`, `metadata.name` and `metadata.generateName` are always accessible from the root of the object. No other metadata properties are accessible.\n\nOnly property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible. Accessible property names are escaped according to the following rules when accessed in the expression: - '__' escapes to '__underscores__' - '.' escapes to '__dot__' - '-' escapes to '__dash__' - '/' escapes to '__slash__' - Property names that exactly match a CEL RESERVED keyword escape to '__{keyword}__'. The keywords are:\n\t \"true\", \"false\", \"null\", \"in\", \"as\", \"break\", \"const\", \"continue\", \"else\", \"for\", \"function\", \"if\",\n\t \"import\", \"let\", \"loop\", \"package\", \"namespace\", \"return\".\nExamples:\n - Expression accessing a property named \"namespace\": {\"Expression\": \"object.__namespace__ > 0\"}\n - Expression accessing a property named \"x-prop\": {\"Expression\": \"object.x__dash__prop > 0\"}\n - Expression accessing a property named \"redact__d\": {\"Expression\": \"object.redact__underscores__d > 0\"}\n\nEquality on arrays with list type of 'set' or 'map' ignores element order, i.e. [1, 2] == [2, 1]. Concatenation on arrays with x-kubernetes-list-type use the semantics of the list type:\n - 'set': `X + Y` performs a union where the array positions of all elements in `X` are preserved and\n non-intersecting elements in `Y` are appended, retaining their partial order.\n - 'map': `X + Y` performs a merge where the array positions of all keys in `X` are preserved but the values\n are overwritten by values in `Y` when the key sets of `X` and `Y` intersect. Elements in `Y` with\n non-intersecting keys are appended, retaining their partial order.\nRequired.",
"message": "Message represents the message displayed when validation fails. The message is required if the Expression contains line breaks. The message must not contain line breaks. If unset, the message is \"failed rule: {Rule}\". e.g. \"must be a URL with the host matching spec.host\" If the Expression contains line breaks. Message is required. The message must not contain line breaks. If unset, the message is \"failed Expression: {Expression}\".",
"reason": "Reason represents a machine-readable description of why this validation failed. If this is the first validation in the list to fail, this reason, as well as the corresponding HTTP response code, are used in the HTTP response to the client. The currently supported reasons are: \"Unauthorized\", \"Forbidden\", \"Invalid\", \"RequestEntityTooLarge\". If not set, StatusReasonInvalid is used in the response to the client.",
"messageExpression": "messageExpression declares a CEL expression that evaluates to the validation failure message that is returned when this rule fails. Since messageExpression is used as a failure message, it must evaluate to a string. If both message and messageExpression are present on a validation, then messageExpression will be used if validation fails. If messageExpression results in a runtime error, the runtime error is logged, and the validation failure message is produced as if the messageExpression field were unset. If messageExpression evaluates to an empty string, a string with only spaces, or a string that contains line breaks, then the validation failure message will also be produced as if the messageExpression field were unset, and the fact that messageExpression produced an empty string/string with only spaces/string with line breaks will be logged. messageExpression has access to all the same variables as the `expression` except for 'authorizer' and 'authorizer.requestResource'. Example: \"object.x must be less than max (\"+string(params.max)+\")\"",
@@ -188,4 +191,14 @@ func (Validation) SwaggerDoc() map[string]string {
return map_Validation
}
+var map_Variable = map[string]string{
+ "": "Variable is the definition of a variable that is used for composition.",
+ "name": "Name is the name of the variable. The name must be a valid CEL identifier and unique among all variables. The variable can be accessed in other expressions through `variables` For example, if name is \"foo\", the variable will be available as `variables.foo`",
+ "expression": "Expression is the expression that will be evaluated as the value of the variable. The CEL expression has access to the same identifiers as the CEL expressions in Validation.",
+}
+
+func (Variable) SwaggerDoc() map[string]string {
+ return map_Variable
+}
+
// AUTO-GENERATED FUNCTIONS END HERE
diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/admissionregistration/v1alpha1/zz_generated.deepcopy.go
index 8e4abfd087..24cd0e4e9b 100644
--- a/vendor/k8s.io/api/admissionregistration/v1alpha1/zz_generated.deepcopy.go
+++ b/vendor/k8s.io/api/admissionregistration/v1alpha1/zz_generated.deepcopy.go
@@ -160,6 +160,16 @@ func (in *ParamKind) DeepCopy() *ParamKind {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ParamRef) DeepCopyInto(out *ParamRef) {
*out = *in
+ if in.Selector != nil {
+ in, out := &in.Selector, &out.Selector
+ *out = new(v1.LabelSelector)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.ParameterNotFoundAction != nil {
+ in, out := &in.ParameterNotFoundAction, &out.ParameterNotFoundAction
+ *out = new(ParameterNotFoundActionType)
+ **out = **in
+ }
return
}
@@ -288,7 +298,7 @@ func (in *ValidatingAdmissionPolicyBindingSpec) DeepCopyInto(out *ValidatingAdmi
if in.ParamRef != nil {
in, out := &in.ParamRef, &out.ParamRef
*out = new(ParamRef)
- **out = **in
+ (*in).DeepCopyInto(*out)
}
if in.MatchResources != nil {
in, out := &in.MatchResources, &out.MatchResources
@@ -381,6 +391,11 @@ func (in *ValidatingAdmissionPolicySpec) DeepCopyInto(out *ValidatingAdmissionPo
*out = make([]MatchCondition, len(*in))
copy(*out, *in)
}
+ if in.Variables != nil {
+ in, out := &in.Variables, &out.Variables
+ *out = make([]Variable, len(*in))
+ copy(*out, *in)
+ }
return
}
@@ -442,3 +457,19 @@ func (in *Validation) DeepCopy() *Validation {
in.DeepCopyInto(out)
return out
}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Variable) DeepCopyInto(out *Variable) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Variable.
+func (in *Variable) DeepCopy() *Variable {
+ if in == nil {
+ return nil
+ }
+ out := new(Variable)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go b/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go
index 8fb354c319..267ddc1cbd 100644
--- a/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go
+++ b/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go
@@ -25,8 +25,9 @@ import (
io "io"
proto "github.com/gogo/protobuf/proto"
- v1 "k8s.io/api/admissionregistration/v1"
- v11 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ v11 "k8s.io/api/admissionregistration/v1"
+ k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
math "math"
math_bits "math/bits"
@@ -45,10 +46,66 @@ var _ = math.Inf
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+func (m *AuditAnnotation) Reset() { *m = AuditAnnotation{} }
+func (*AuditAnnotation) ProtoMessage() {}
+func (*AuditAnnotation) Descriptor() ([]byte, []int) {
+ return fileDescriptor_abeea74cbc46f55a, []int{0}
+}
+func (m *AuditAnnotation) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *AuditAnnotation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *AuditAnnotation) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_AuditAnnotation.Merge(m, src)
+}
+func (m *AuditAnnotation) XXX_Size() int {
+ return m.Size()
+}
+func (m *AuditAnnotation) XXX_DiscardUnknown() {
+ xxx_messageInfo_AuditAnnotation.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuditAnnotation proto.InternalMessageInfo
+
+func (m *ExpressionWarning) Reset() { *m = ExpressionWarning{} }
+func (*ExpressionWarning) ProtoMessage() {}
+func (*ExpressionWarning) Descriptor() ([]byte, []int) {
+ return fileDescriptor_abeea74cbc46f55a, []int{1}
+}
+func (m *ExpressionWarning) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ExpressionWarning) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ExpressionWarning) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ExpressionWarning.Merge(m, src)
+}
+func (m *ExpressionWarning) XXX_Size() int {
+ return m.Size()
+}
+func (m *ExpressionWarning) XXX_DiscardUnknown() {
+ xxx_messageInfo_ExpressionWarning.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ExpressionWarning proto.InternalMessageInfo
+
func (m *MatchCondition) Reset() { *m = MatchCondition{} }
func (*MatchCondition) ProtoMessage() {}
func (*MatchCondition) Descriptor() ([]byte, []int) {
- return fileDescriptor_abeea74cbc46f55a, []int{0}
+ return fileDescriptor_abeea74cbc46f55a, []int{2}
}
func (m *MatchCondition) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -73,10 +130,38 @@ func (m *MatchCondition) XXX_DiscardUnknown() {
var xxx_messageInfo_MatchCondition proto.InternalMessageInfo
+func (m *MatchResources) Reset() { *m = MatchResources{} }
+func (*MatchResources) ProtoMessage() {}
+func (*MatchResources) Descriptor() ([]byte, []int) {
+ return fileDescriptor_abeea74cbc46f55a, []int{3}
+}
+func (m *MatchResources) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *MatchResources) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *MatchResources) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MatchResources.Merge(m, src)
+}
+func (m *MatchResources) XXX_Size() int {
+ return m.Size()
+}
+func (m *MatchResources) XXX_DiscardUnknown() {
+ xxx_messageInfo_MatchResources.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MatchResources proto.InternalMessageInfo
+
func (m *MutatingWebhook) Reset() { *m = MutatingWebhook{} }
func (*MutatingWebhook) ProtoMessage() {}
func (*MutatingWebhook) Descriptor() ([]byte, []int) {
- return fileDescriptor_abeea74cbc46f55a, []int{1}
+ return fileDescriptor_abeea74cbc46f55a, []int{4}
}
func (m *MutatingWebhook) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -104,7 +189,7 @@ var xxx_messageInfo_MutatingWebhook proto.InternalMessageInfo
func (m *MutatingWebhookConfiguration) Reset() { *m = MutatingWebhookConfiguration{} }
func (*MutatingWebhookConfiguration) ProtoMessage() {}
func (*MutatingWebhookConfiguration) Descriptor() ([]byte, []int) {
- return fileDescriptor_abeea74cbc46f55a, []int{2}
+ return fileDescriptor_abeea74cbc46f55a, []int{5}
}
func (m *MutatingWebhookConfiguration) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -132,7 +217,7 @@ var xxx_messageInfo_MutatingWebhookConfiguration proto.InternalMessageInfo
func (m *MutatingWebhookConfigurationList) Reset() { *m = MutatingWebhookConfigurationList{} }
func (*MutatingWebhookConfigurationList) ProtoMessage() {}
func (*MutatingWebhookConfigurationList) Descriptor() ([]byte, []int) {
- return fileDescriptor_abeea74cbc46f55a, []int{3}
+ return fileDescriptor_abeea74cbc46f55a, []int{6}
}
func (m *MutatingWebhookConfigurationList) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -157,10 +242,94 @@ func (m *MutatingWebhookConfigurationList) XXX_DiscardUnknown() {
var xxx_messageInfo_MutatingWebhookConfigurationList proto.InternalMessageInfo
+func (m *NamedRuleWithOperations) Reset() { *m = NamedRuleWithOperations{} }
+func (*NamedRuleWithOperations) ProtoMessage() {}
+func (*NamedRuleWithOperations) Descriptor() ([]byte, []int) {
+ return fileDescriptor_abeea74cbc46f55a, []int{7}
+}
+func (m *NamedRuleWithOperations) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *NamedRuleWithOperations) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *NamedRuleWithOperations) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_NamedRuleWithOperations.Merge(m, src)
+}
+func (m *NamedRuleWithOperations) XXX_Size() int {
+ return m.Size()
+}
+func (m *NamedRuleWithOperations) XXX_DiscardUnknown() {
+ xxx_messageInfo_NamedRuleWithOperations.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_NamedRuleWithOperations proto.InternalMessageInfo
+
+func (m *ParamKind) Reset() { *m = ParamKind{} }
+func (*ParamKind) ProtoMessage() {}
+func (*ParamKind) Descriptor() ([]byte, []int) {
+ return fileDescriptor_abeea74cbc46f55a, []int{8}
+}
+func (m *ParamKind) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ParamKind) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ParamKind) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ParamKind.Merge(m, src)
+}
+func (m *ParamKind) XXX_Size() int {
+ return m.Size()
+}
+func (m *ParamKind) XXX_DiscardUnknown() {
+ xxx_messageInfo_ParamKind.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ParamKind proto.InternalMessageInfo
+
+func (m *ParamRef) Reset() { *m = ParamRef{} }
+func (*ParamRef) ProtoMessage() {}
+func (*ParamRef) Descriptor() ([]byte, []int) {
+ return fileDescriptor_abeea74cbc46f55a, []int{9}
+}
+func (m *ParamRef) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ParamRef) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ParamRef) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ParamRef.Merge(m, src)
+}
+func (m *ParamRef) XXX_Size() int {
+ return m.Size()
+}
+func (m *ParamRef) XXX_DiscardUnknown() {
+ xxx_messageInfo_ParamRef.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ParamRef proto.InternalMessageInfo
+
func (m *ServiceReference) Reset() { *m = ServiceReference{} }
func (*ServiceReference) ProtoMessage() {}
func (*ServiceReference) Descriptor() ([]byte, []int) {
- return fileDescriptor_abeea74cbc46f55a, []int{4}
+ return fileDescriptor_abeea74cbc46f55a, []int{10}
}
func (m *ServiceReference) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -185,10 +354,234 @@ func (m *ServiceReference) XXX_DiscardUnknown() {
var xxx_messageInfo_ServiceReference proto.InternalMessageInfo
+func (m *TypeChecking) Reset() { *m = TypeChecking{} }
+func (*TypeChecking) ProtoMessage() {}
+func (*TypeChecking) Descriptor() ([]byte, []int) {
+ return fileDescriptor_abeea74cbc46f55a, []int{11}
+}
+func (m *TypeChecking) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *TypeChecking) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *TypeChecking) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_TypeChecking.Merge(m, src)
+}
+func (m *TypeChecking) XXX_Size() int {
+ return m.Size()
+}
+func (m *TypeChecking) XXX_DiscardUnknown() {
+ xxx_messageInfo_TypeChecking.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_TypeChecking proto.InternalMessageInfo
+
+func (m *ValidatingAdmissionPolicy) Reset() { *m = ValidatingAdmissionPolicy{} }
+func (*ValidatingAdmissionPolicy) ProtoMessage() {}
+func (*ValidatingAdmissionPolicy) Descriptor() ([]byte, []int) {
+ return fileDescriptor_abeea74cbc46f55a, []int{12}
+}
+func (m *ValidatingAdmissionPolicy) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ValidatingAdmissionPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ValidatingAdmissionPolicy) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ValidatingAdmissionPolicy.Merge(m, src)
+}
+func (m *ValidatingAdmissionPolicy) XXX_Size() int {
+ return m.Size()
+}
+func (m *ValidatingAdmissionPolicy) XXX_DiscardUnknown() {
+ xxx_messageInfo_ValidatingAdmissionPolicy.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ValidatingAdmissionPolicy proto.InternalMessageInfo
+
+func (m *ValidatingAdmissionPolicyBinding) Reset() { *m = ValidatingAdmissionPolicyBinding{} }
+func (*ValidatingAdmissionPolicyBinding) ProtoMessage() {}
+func (*ValidatingAdmissionPolicyBinding) Descriptor() ([]byte, []int) {
+ return fileDescriptor_abeea74cbc46f55a, []int{13}
+}
+func (m *ValidatingAdmissionPolicyBinding) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ValidatingAdmissionPolicyBinding) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ValidatingAdmissionPolicyBinding) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ValidatingAdmissionPolicyBinding.Merge(m, src)
+}
+func (m *ValidatingAdmissionPolicyBinding) XXX_Size() int {
+ return m.Size()
+}
+func (m *ValidatingAdmissionPolicyBinding) XXX_DiscardUnknown() {
+ xxx_messageInfo_ValidatingAdmissionPolicyBinding.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ValidatingAdmissionPolicyBinding proto.InternalMessageInfo
+
+func (m *ValidatingAdmissionPolicyBindingList) Reset() { *m = ValidatingAdmissionPolicyBindingList{} }
+func (*ValidatingAdmissionPolicyBindingList) ProtoMessage() {}
+func (*ValidatingAdmissionPolicyBindingList) Descriptor() ([]byte, []int) {
+ return fileDescriptor_abeea74cbc46f55a, []int{14}
+}
+func (m *ValidatingAdmissionPolicyBindingList) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ValidatingAdmissionPolicyBindingList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ValidatingAdmissionPolicyBindingList) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ValidatingAdmissionPolicyBindingList.Merge(m, src)
+}
+func (m *ValidatingAdmissionPolicyBindingList) XXX_Size() int {
+ return m.Size()
+}
+func (m *ValidatingAdmissionPolicyBindingList) XXX_DiscardUnknown() {
+ xxx_messageInfo_ValidatingAdmissionPolicyBindingList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ValidatingAdmissionPolicyBindingList proto.InternalMessageInfo
+
+func (m *ValidatingAdmissionPolicyBindingSpec) Reset() { *m = ValidatingAdmissionPolicyBindingSpec{} }
+func (*ValidatingAdmissionPolicyBindingSpec) ProtoMessage() {}
+func (*ValidatingAdmissionPolicyBindingSpec) Descriptor() ([]byte, []int) {
+ return fileDescriptor_abeea74cbc46f55a, []int{15}
+}
+func (m *ValidatingAdmissionPolicyBindingSpec) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ValidatingAdmissionPolicyBindingSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ValidatingAdmissionPolicyBindingSpec) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ValidatingAdmissionPolicyBindingSpec.Merge(m, src)
+}
+func (m *ValidatingAdmissionPolicyBindingSpec) XXX_Size() int {
+ return m.Size()
+}
+func (m *ValidatingAdmissionPolicyBindingSpec) XXX_DiscardUnknown() {
+ xxx_messageInfo_ValidatingAdmissionPolicyBindingSpec.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ValidatingAdmissionPolicyBindingSpec proto.InternalMessageInfo
+
+func (m *ValidatingAdmissionPolicyList) Reset() { *m = ValidatingAdmissionPolicyList{} }
+func (*ValidatingAdmissionPolicyList) ProtoMessage() {}
+func (*ValidatingAdmissionPolicyList) Descriptor() ([]byte, []int) {
+ return fileDescriptor_abeea74cbc46f55a, []int{16}
+}
+func (m *ValidatingAdmissionPolicyList) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ValidatingAdmissionPolicyList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ValidatingAdmissionPolicyList) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ValidatingAdmissionPolicyList.Merge(m, src)
+}
+func (m *ValidatingAdmissionPolicyList) XXX_Size() int {
+ return m.Size()
+}
+func (m *ValidatingAdmissionPolicyList) XXX_DiscardUnknown() {
+ xxx_messageInfo_ValidatingAdmissionPolicyList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ValidatingAdmissionPolicyList proto.InternalMessageInfo
+
+func (m *ValidatingAdmissionPolicySpec) Reset() { *m = ValidatingAdmissionPolicySpec{} }
+func (*ValidatingAdmissionPolicySpec) ProtoMessage() {}
+func (*ValidatingAdmissionPolicySpec) Descriptor() ([]byte, []int) {
+ return fileDescriptor_abeea74cbc46f55a, []int{17}
+}
+func (m *ValidatingAdmissionPolicySpec) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ValidatingAdmissionPolicySpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ValidatingAdmissionPolicySpec) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ValidatingAdmissionPolicySpec.Merge(m, src)
+}
+func (m *ValidatingAdmissionPolicySpec) XXX_Size() int {
+ return m.Size()
+}
+func (m *ValidatingAdmissionPolicySpec) XXX_DiscardUnknown() {
+ xxx_messageInfo_ValidatingAdmissionPolicySpec.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ValidatingAdmissionPolicySpec proto.InternalMessageInfo
+
+func (m *ValidatingAdmissionPolicyStatus) Reset() { *m = ValidatingAdmissionPolicyStatus{} }
+func (*ValidatingAdmissionPolicyStatus) ProtoMessage() {}
+func (*ValidatingAdmissionPolicyStatus) Descriptor() ([]byte, []int) {
+ return fileDescriptor_abeea74cbc46f55a, []int{18}
+}
+func (m *ValidatingAdmissionPolicyStatus) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ValidatingAdmissionPolicyStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ValidatingAdmissionPolicyStatus) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ValidatingAdmissionPolicyStatus.Merge(m, src)
+}
+func (m *ValidatingAdmissionPolicyStatus) XXX_Size() int {
+ return m.Size()
+}
+func (m *ValidatingAdmissionPolicyStatus) XXX_DiscardUnknown() {
+ xxx_messageInfo_ValidatingAdmissionPolicyStatus.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ValidatingAdmissionPolicyStatus proto.InternalMessageInfo
+
func (m *ValidatingWebhook) Reset() { *m = ValidatingWebhook{} }
func (*ValidatingWebhook) ProtoMessage() {}
func (*ValidatingWebhook) Descriptor() ([]byte, []int) {
- return fileDescriptor_abeea74cbc46f55a, []int{5}
+ return fileDescriptor_abeea74cbc46f55a, []int{19}
}
func (m *ValidatingWebhook) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -216,7 +609,7 @@ var xxx_messageInfo_ValidatingWebhook proto.InternalMessageInfo
func (m *ValidatingWebhookConfiguration) Reset() { *m = ValidatingWebhookConfiguration{} }
func (*ValidatingWebhookConfiguration) ProtoMessage() {}
func (*ValidatingWebhookConfiguration) Descriptor() ([]byte, []int) {
- return fileDescriptor_abeea74cbc46f55a, []int{6}
+ return fileDescriptor_abeea74cbc46f55a, []int{20}
}
func (m *ValidatingWebhookConfiguration) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -244,7 +637,7 @@ var xxx_messageInfo_ValidatingWebhookConfiguration proto.InternalMessageInfo
func (m *ValidatingWebhookConfigurationList) Reset() { *m = ValidatingWebhookConfigurationList{} }
func (*ValidatingWebhookConfigurationList) ProtoMessage() {}
func (*ValidatingWebhookConfigurationList) Descriptor() ([]byte, []int) {
- return fileDescriptor_abeea74cbc46f55a, []int{7}
+ return fileDescriptor_abeea74cbc46f55a, []int{21}
}
func (m *ValidatingWebhookConfigurationList) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -269,15 +662,15 @@ func (m *ValidatingWebhookConfigurationList) XXX_DiscardUnknown() {
var xxx_messageInfo_ValidatingWebhookConfigurationList proto.InternalMessageInfo
-func (m *WebhookClientConfig) Reset() { *m = WebhookClientConfig{} }
-func (*WebhookClientConfig) ProtoMessage() {}
-func (*WebhookClientConfig) Descriptor() ([]byte, []int) {
- return fileDescriptor_abeea74cbc46f55a, []int{8}
+func (m *Validation) Reset() { *m = Validation{} }
+func (*Validation) ProtoMessage() {}
+func (*Validation) Descriptor() ([]byte, []int) {
+ return fileDescriptor_abeea74cbc46f55a, []int{22}
}
-func (m *WebhookClientConfig) XXX_Unmarshal(b []byte) error {
+func (m *Validation) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
-func (m *WebhookClientConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+func (m *Validation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
@@ -285,27 +678,99 @@ func (m *WebhookClientConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte,
}
return b[:n], nil
}
-func (m *WebhookClientConfig) XXX_Merge(src proto.Message) {
- xxx_messageInfo_WebhookClientConfig.Merge(m, src)
+func (m *Validation) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Validation.Merge(m, src)
}
-func (m *WebhookClientConfig) XXX_Size() int {
+func (m *Validation) XXX_Size() int {
return m.Size()
}
-func (m *WebhookClientConfig) XXX_DiscardUnknown() {
- xxx_messageInfo_WebhookClientConfig.DiscardUnknown(m)
+func (m *Validation) XXX_DiscardUnknown() {
+ xxx_messageInfo_Validation.DiscardUnknown(m)
}
-var xxx_messageInfo_WebhookClientConfig proto.InternalMessageInfo
+var xxx_messageInfo_Validation proto.InternalMessageInfo
-func init() {
- proto.RegisterType((*MatchCondition)(nil), "k8s.io.api.admissionregistration.v1beta1.MatchCondition")
- proto.RegisterType((*MutatingWebhook)(nil), "k8s.io.api.admissionregistration.v1beta1.MutatingWebhook")
+func (m *Variable) Reset() { *m = Variable{} }
+func (*Variable) ProtoMessage() {}
+func (*Variable) Descriptor() ([]byte, []int) {
+ return fileDescriptor_abeea74cbc46f55a, []int{23}
+}
+func (m *Variable) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Variable) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *Variable) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Variable.Merge(m, src)
+}
+func (m *Variable) XXX_Size() int {
+ return m.Size()
+}
+func (m *Variable) XXX_DiscardUnknown() {
+ xxx_messageInfo_Variable.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Variable proto.InternalMessageInfo
+
+func (m *WebhookClientConfig) Reset() { *m = WebhookClientConfig{} }
+func (*WebhookClientConfig) ProtoMessage() {}
+func (*WebhookClientConfig) Descriptor() ([]byte, []int) {
+ return fileDescriptor_abeea74cbc46f55a, []int{24}
+}
+func (m *WebhookClientConfig) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *WebhookClientConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *WebhookClientConfig) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_WebhookClientConfig.Merge(m, src)
+}
+func (m *WebhookClientConfig) XXX_Size() int {
+ return m.Size()
+}
+func (m *WebhookClientConfig) XXX_DiscardUnknown() {
+ xxx_messageInfo_WebhookClientConfig.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_WebhookClientConfig proto.InternalMessageInfo
+
+func init() {
+ proto.RegisterType((*AuditAnnotation)(nil), "k8s.io.api.admissionregistration.v1beta1.AuditAnnotation")
+ proto.RegisterType((*ExpressionWarning)(nil), "k8s.io.api.admissionregistration.v1beta1.ExpressionWarning")
+ proto.RegisterType((*MatchCondition)(nil), "k8s.io.api.admissionregistration.v1beta1.MatchCondition")
+ proto.RegisterType((*MatchResources)(nil), "k8s.io.api.admissionregistration.v1beta1.MatchResources")
+ proto.RegisterType((*MutatingWebhook)(nil), "k8s.io.api.admissionregistration.v1beta1.MutatingWebhook")
proto.RegisterType((*MutatingWebhookConfiguration)(nil), "k8s.io.api.admissionregistration.v1beta1.MutatingWebhookConfiguration")
proto.RegisterType((*MutatingWebhookConfigurationList)(nil), "k8s.io.api.admissionregistration.v1beta1.MutatingWebhookConfigurationList")
+ proto.RegisterType((*NamedRuleWithOperations)(nil), "k8s.io.api.admissionregistration.v1beta1.NamedRuleWithOperations")
+ proto.RegisterType((*ParamKind)(nil), "k8s.io.api.admissionregistration.v1beta1.ParamKind")
+ proto.RegisterType((*ParamRef)(nil), "k8s.io.api.admissionregistration.v1beta1.ParamRef")
proto.RegisterType((*ServiceReference)(nil), "k8s.io.api.admissionregistration.v1beta1.ServiceReference")
+ proto.RegisterType((*TypeChecking)(nil), "k8s.io.api.admissionregistration.v1beta1.TypeChecking")
+ proto.RegisterType((*ValidatingAdmissionPolicy)(nil), "k8s.io.api.admissionregistration.v1beta1.ValidatingAdmissionPolicy")
+ proto.RegisterType((*ValidatingAdmissionPolicyBinding)(nil), "k8s.io.api.admissionregistration.v1beta1.ValidatingAdmissionPolicyBinding")
+ proto.RegisterType((*ValidatingAdmissionPolicyBindingList)(nil), "k8s.io.api.admissionregistration.v1beta1.ValidatingAdmissionPolicyBindingList")
+ proto.RegisterType((*ValidatingAdmissionPolicyBindingSpec)(nil), "k8s.io.api.admissionregistration.v1beta1.ValidatingAdmissionPolicyBindingSpec")
+ proto.RegisterType((*ValidatingAdmissionPolicyList)(nil), "k8s.io.api.admissionregistration.v1beta1.ValidatingAdmissionPolicyList")
+ proto.RegisterType((*ValidatingAdmissionPolicySpec)(nil), "k8s.io.api.admissionregistration.v1beta1.ValidatingAdmissionPolicySpec")
+ proto.RegisterType((*ValidatingAdmissionPolicyStatus)(nil), "k8s.io.api.admissionregistration.v1beta1.ValidatingAdmissionPolicyStatus")
proto.RegisterType((*ValidatingWebhook)(nil), "k8s.io.api.admissionregistration.v1beta1.ValidatingWebhook")
proto.RegisterType((*ValidatingWebhookConfiguration)(nil), "k8s.io.api.admissionregistration.v1beta1.ValidatingWebhookConfiguration")
proto.RegisterType((*ValidatingWebhookConfigurationList)(nil), "k8s.io.api.admissionregistration.v1beta1.ValidatingWebhookConfigurationList")
+ proto.RegisterType((*Validation)(nil), "k8s.io.api.admissionregistration.v1beta1.Validation")
+ proto.RegisterType((*Variable)(nil), "k8s.io.api.admissionregistration.v1beta1.Variable")
proto.RegisterType((*WebhookClientConfig)(nil), "k8s.io.api.admissionregistration.v1beta1.WebhookClientConfig")
}
@@ -314,73 +779,197 @@ func init() {
}
var fileDescriptor_abeea74cbc46f55a = []byte{
- // 1041 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x57, 0x4f, 0x73, 0xdb, 0xc4,
- 0x1b, 0x8e, 0xe2, 0xf8, 0x17, 0x67, 0xed, 0x24, 0xcd, 0xfe, 0x80, 0x88, 0xd0, 0xb1, 0x3c, 0x3e,
- 0x30, 0xbe, 0x20, 0xb5, 0x29, 0x03, 0xa5, 0x0c, 0x87, 0x2a, 0xb4, 0x03, 0x33, 0x49, 0x5a, 0x36,
- 0xfd, 0x33, 0x03, 0x65, 0xa6, 0x6b, 0xf9, 0xb5, 0xbd, 0x58, 0xd2, 0x7a, 0xb4, 0xab, 0xb4, 0x19,
- 0x2e, 0x7c, 0x04, 0xbe, 0x02, 0x1f, 0x84, 0x03, 0xb7, 0x1c, 0x7b, 0xec, 0x05, 0x0d, 0x11, 0x67,
- 0x0e, 0x5c, 0x73, 0x62, 0xb4, 0x52, 0x6c, 0xcb, 0x76, 0x5a, 0x11, 0x66, 0x72, 0xca, 0xcd, 0xfb,
- 0xbc, 0xfb, 0xbe, 0xcf, 0x3e, 0xab, 0x77, 0xdf, 0x67, 0x8c, 0xbe, 0x19, 0xdc, 0x16, 0x26, 0xe3,
- 0xd6, 0x20, 0x6c, 0x43, 0xe0, 0x83, 0x04, 0x61, 0x1d, 0x82, 0xdf, 0xe1, 0x81, 0x95, 0x05, 0xe8,
- 0x90, 0x59, 0xb4, 0xe3, 0x31, 0x21, 0x18, 0xf7, 0x03, 0xe8, 0x31, 0x21, 0x03, 0x2a, 0x19, 0xf7,
- 0xad, 0xc3, 0x9b, 0x6d, 0x90, 0xf4, 0xa6, 0xd5, 0x03, 0x1f, 0x02, 0x2a, 0xa1, 0x63, 0x0e, 0x03,
- 0x2e, 0x39, 0x6e, 0xa5, 0x99, 0x26, 0x1d, 0x32, 0x73, 0x6e, 0xa6, 0x99, 0x65, 0x6e, 0x7d, 0xd4,
- 0x63, 0xb2, 0x1f, 0xb6, 0x4d, 0x87, 0x7b, 0x56, 0x8f, 0xf7, 0xb8, 0xa5, 0x0a, 0xb4, 0xc3, 0xae,
- 0x5a, 0xa9, 0x85, 0xfa, 0x95, 0x16, 0xde, 0xba, 0x55, 0xe0, 0x48, 0xd3, 0xa7, 0xd9, 0xfa, 0x78,
- 0x9c, 0xe4, 0x51, 0xa7, 0xcf, 0x7c, 0x08, 0x8e, 0xac, 0xe1, 0xa0, 0x97, 0x00, 0xc2, 0xf2, 0x40,
- 0xd2, 0x79, 0x59, 0xd6, 0x79, 0x59, 0x41, 0xe8, 0x4b, 0xe6, 0xc1, 0x4c, 0xc2, 0x27, 0x6f, 0x4b,
- 0x10, 0x4e, 0x1f, 0x3c, 0x3a, 0x9d, 0xd7, 0xec, 0xa2, 0xb5, 0x3d, 0x2a, 0x9d, 0xfe, 0x0e, 0xf7,
- 0x3b, 0x2c, 0xd1, 0x80, 0x1b, 0x68, 0xc9, 0xa7, 0x1e, 0xe8, 0x5a, 0x43, 0x6b, 0xad, 0xd8, 0xb5,
- 0xe3, 0xc8, 0x58, 0x88, 0x23, 0x63, 0x69, 0x9f, 0x7a, 0x40, 0x54, 0x04, 0x6f, 0x23, 0x04, 0x2f,
- 0x87, 0x01, 0x28, 0xfd, 0xfa, 0xa2, 0xda, 0x87, 0xb3, 0x7d, 0xe8, 0xde, 0x28, 0x42, 0x26, 0x76,
- 0x35, 0x7f, 0xab, 0xa0, 0xf5, 0xbd, 0x50, 0x52, 0xc9, 0xfc, 0xde, 0x53, 0x68, 0xf7, 0x39, 0x1f,
- 0x14, 0x60, 0x7a, 0x81, 0x6a, 0x8e, 0xcb, 0xc0, 0x97, 0x3b, 0xdc, 0xef, 0xb2, 0x9e, 0xe2, 0xaa,
- 0x6e, 0x7f, 0x61, 0x16, 0xfd, 0xc2, 0x66, 0x46, 0xb5, 0x33, 0x51, 0xc4, 0x7e, 0x27, 0x23, 0xaa,
- 0x4d, 0xa2, 0x24, 0x47, 0x84, 0x9f, 0xa1, 0x72, 0x10, 0xba, 0x20, 0xf4, 0x52, 0xa3, 0xd4, 0xaa,
- 0x6e, 0x7f, 0x5a, 0x84, 0xd1, 0x24, 0xa1, 0x0b, 0x4f, 0x99, 0xec, 0x3f, 0x18, 0x42, 0x0a, 0x0a,
- 0x7b, 0x35, 0xe3, 0x2a, 0x27, 0x31, 0x41, 0xd2, 0xa2, 0x78, 0x17, 0xad, 0x76, 0x29, 0x73, 0xc3,
- 0x00, 0x1e, 0x72, 0x97, 0x39, 0x47, 0xfa, 0x92, 0xba, 0x81, 0x0f, 0xe3, 0xc8, 0x58, 0xbd, 0x3f,
- 0x19, 0x38, 0x8d, 0x8c, 0x8d, 0x1c, 0xf0, 0xe8, 0x68, 0x08, 0x24, 0x9f, 0x8c, 0xbf, 0x44, 0x55,
- 0x2f, 0xf9, 0x84, 0x59, 0xad, 0x15, 0x55, 0xab, 0x19, 0x47, 0x46, 0x75, 0x6f, 0x0c, 0x9f, 0x46,
- 0xc6, 0xfa, 0xc4, 0x52, 0xd5, 0x99, 0x4c, 0xc3, 0x2f, 0xd1, 0x46, 0x72, 0xe5, 0x62, 0x48, 0x1d,
- 0x38, 0x00, 0x17, 0x1c, 0xc9, 0x03, 0xbd, 0xac, 0xee, 0xfb, 0xd6, 0x84, 0xfa, 0x51, 0x73, 0x99,
- 0xc3, 0x41, 0x2f, 0x01, 0x84, 0x99, 0xf4, 0x70, 0x22, 0x7f, 0x97, 0xb6, 0xc1, 0x3d, 0x4b, 0xb5,
- 0xdf, 0x8d, 0x23, 0x63, 0x63, 0x7f, 0xba, 0x22, 0x99, 0x25, 0xc1, 0x1c, 0xad, 0xf1, 0xf6, 0x0f,
- 0xe0, 0xc8, 0x11, 0x6d, 0xf5, 0xe2, 0xb4, 0x38, 0x8e, 0x8c, 0xb5, 0x07, 0xb9, 0x72, 0x64, 0xaa,
- 0x7c, 0x72, 0x61, 0x82, 0x75, 0xe0, 0x5e, 0xb7, 0x0b, 0x8e, 0x14, 0xfa, 0xff, 0xc6, 0x17, 0x76,
- 0x30, 0x86, 0x93, 0x0b, 0x1b, 0x2f, 0x77, 0x5c, 0x2a, 0x04, 0x99, 0x4c, 0xc3, 0x77, 0xd0, 0x5a,
- 0xf2, 0xb0, 0x78, 0x28, 0x0f, 0xc0, 0xe1, 0x7e, 0x47, 0xe8, 0xcb, 0x0d, 0xad, 0x55, 0x4e, 0x4f,
- 0xf0, 0x28, 0x17, 0x21, 0x53, 0x3b, 0xf1, 0x63, 0xb4, 0x39, 0xea, 0x22, 0x02, 0x87, 0x0c, 0x5e,
- 0x3c, 0x81, 0x20, 0x59, 0x08, 0xbd, 0xd2, 0x28, 0xb5, 0x56, 0xec, 0x0f, 0xe2, 0xc8, 0xd8, 0xbc,
- 0x3b, 0x7f, 0x0b, 0x39, 0x2f, 0x17, 0x3f, 0x47, 0x38, 0x00, 0xe6, 0x1f, 0x72, 0x47, 0xb5, 0x5f,
- 0xd6, 0x10, 0x48, 0xe9, 0xbb, 0x11, 0x47, 0x06, 0x26, 0x33, 0xd1, 0xd3, 0xc8, 0x78, 0x6f, 0x16,
- 0x55, 0xed, 0x31, 0xa7, 0x16, 0xfe, 0x11, 0xad, 0x7b, 0xb9, 0x71, 0x21, 0xf4, 0x9a, 0x7a, 0x21,
- 0xb7, 0x8b, 0xbf, 0xc9, 0xfc, 0xbc, 0xb1, 0x37, 0xb3, 0x27, 0xb2, 0x9e, 0xc7, 0x05, 0x99, 0x66,
- 0x6a, 0xfe, 0xae, 0xa1, 0xeb, 0x53, 0x33, 0x24, 0x7d, 0xae, 0x61, 0xca, 0x80, 0x9f, 0xa3, 0x4a,
- 0xd2, 0x15, 0x1d, 0x2a, 0xa9, 0x1a, 0x2a, 0xd5, 0xed, 0x1b, 0xc5, 0x7a, 0x28, 0x6d, 0x98, 0x3d,
- 0x90, 0x74, 0x3c, 0xc8, 0xc6, 0x18, 0x19, 0x55, 0xc5, 0xdf, 0xa1, 0x4a, 0xc6, 0x2c, 0xf4, 0x45,
- 0x25, 0xfc, 0xb3, 0x7f, 0x21, 0x3c, 0x7f, 0x76, 0x7b, 0x29, 0xa1, 0x22, 0xa3, 0x82, 0xcd, 0xbf,
- 0x34, 0xd4, 0x78, 0x93, 0xbe, 0x5d, 0x26, 0x24, 0x7e, 0x36, 0xa3, 0xd1, 0x2c, 0xf8, 0x4e, 0x98,
- 0x48, 0x15, 0x5e, 0xcb, 0x14, 0x56, 0xce, 0x90, 0x09, 0x7d, 0x03, 0x54, 0x66, 0x12, 0xbc, 0x33,
- 0x71, 0xf7, 0x2f, 0x2c, 0x2e, 0x77, 0xf0, 0xf1, 0x18, 0xfc, 0x3a, 0x29, 0x4e, 0x52, 0x8e, 0xe6,
- 0x2f, 0x1a, 0xba, 0x76, 0x00, 0xc1, 0x21, 0x73, 0x80, 0x40, 0x17, 0x02, 0xf0, 0x1d, 0xc0, 0x16,
- 0x5a, 0x19, 0x8d, 0x88, 0xcc, 0x19, 0x36, 0xb2, 0xec, 0x95, 0xd1, 0x38, 0x21, 0xe3, 0x3d, 0x23,
- 0x17, 0x59, 0x3c, 0xd7, 0x45, 0xae, 0xa3, 0xa5, 0x21, 0x95, 0x7d, 0xbd, 0xa4, 0x76, 0x54, 0x92,
- 0xe8, 0x43, 0x2a, 0xfb, 0x44, 0xa1, 0x2a, 0xca, 0x03, 0xa9, 0x66, 0x70, 0x39, 0x8b, 0xf2, 0x40,
- 0x12, 0x85, 0x36, 0x4f, 0x96, 0xd1, 0xc6, 0x13, 0xea, 0xb2, 0xce, 0x95, 0x73, 0x5d, 0x39, 0xd7,
- 0xdb, 0x9d, 0x0b, 0x5d, 0x39, 0xd7, 0x85, 0x9c, 0x6b, 0x8e, 0xaf, 0x54, 0x2f, 0xcd, 0x57, 0x4e,
- 0x34, 0x54, 0x9f, 0x79, 0xe3, 0x97, 0xed, 0x2c, 0xdf, 0xcf, 0x38, 0xcb, 0xe7, 0xc5, 0xa5, 0xcf,
- 0x9c, 0x7e, 0xc6, 0x5b, 0xfe, 0xd6, 0x50, 0xf3, 0xcd, 0x1a, 0x2f, 0xc1, 0x5d, 0xbc, 0xbc, 0xbb,
- 0x7c, 0xf5, 0x1f, 0x04, 0x16, 0xf1, 0x97, 0x5f, 0x35, 0xf4, 0xff, 0x39, 0x63, 0x14, 0xbf, 0x8f,
- 0x4a, 0x61, 0xe0, 0x66, 0x76, 0xb0, 0x1c, 0x47, 0x46, 0xe9, 0x31, 0xd9, 0x25, 0x09, 0x86, 0x29,
- 0x5a, 0x16, 0xa9, 0x23, 0x65, 0xf2, 0xef, 0x14, 0x3f, 0xe3, 0xb4, 0x95, 0xd9, 0xd5, 0x38, 0x32,
- 0x96, 0xcf, 0xd0, 0xb3, 0xba, 0xb8, 0x85, 0x2a, 0x0e, 0xb5, 0x43, 0xbf, 0xe3, 0xa6, 0x9e, 0x55,
- 0xb3, 0x6b, 0xc9, 0x75, 0xed, 0xdc, 0x4d, 0x31, 0x32, 0x8a, 0xda, 0xfb, 0xc7, 0x27, 0xf5, 0x85,
- 0x57, 0x27, 0xf5, 0x85, 0xd7, 0x27, 0xf5, 0x85, 0x9f, 0xe2, 0xba, 0x76, 0x1c, 0xd7, 0xb5, 0x57,
- 0x71, 0x5d, 0x7b, 0x1d, 0xd7, 0xb5, 0x3f, 0xe2, 0xba, 0xf6, 0xf3, 0x9f, 0xf5, 0x85, 0x6f, 0x5b,
- 0x45, 0xff, 0x28, 0xff, 0x13, 0x00, 0x00, 0xff, 0xff, 0x1f, 0xf5, 0x97, 0x1c, 0x6c, 0x0f, 0x00,
- 0x00,
+ // 1973 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x1a, 0x4d, 0x6f, 0x23, 0x49,
+ 0x35, 0x1d, 0xe7, 0xc3, 0x7e, 0xce, 0x97, 0x6b, 0x67, 0x89, 0x77, 0x76, 0xd6, 0x8e, 0x5a, 0x2b,
+ 0x94, 0x91, 0xc0, 0xde, 0xc9, 0xae, 0x76, 0x97, 0x59, 0x21, 0x14, 0x67, 0x67, 0x86, 0x99, 0x9d,
+ 0x64, 0x42, 0x65, 0x37, 0x91, 0x60, 0x57, 0x9a, 0x72, 0x77, 0xd9, 0x6e, 0x6c, 0x77, 0x37, 0x5d,
+ 0x6d, 0xcf, 0x04, 0x24, 0x40, 0xe2, 0xb0, 0x57, 0x24, 0x2e, 0x48, 0x9c, 0xf8, 0x0b, 0xdc, 0x91,
+ 0xe0, 0x36, 0xc7, 0xbd, 0x31, 0x12, 0xc2, 0x22, 0xe6, 0xc0, 0x89, 0x03, 0x07, 0x38, 0xe4, 0x02,
+ 0xaa, 0xea, 0xea, 0x4f, 0xb7, 0x27, 0x9d, 0x90, 0x09, 0x97, 0xb9, 0xa5, 0xdf, 0x67, 0xbd, 0x57,
+ 0xef, 0xab, 0x9e, 0x03, 0xdf, 0xeb, 0x7e, 0xc8, 0x6a, 0x86, 0x55, 0xef, 0x0e, 0x9a, 0xd4, 0x31,
+ 0xa9, 0x4b, 0x59, 0x7d, 0x48, 0x4d, 0xdd, 0x72, 0xea, 0x12, 0x41, 0x6c, 0xa3, 0x4e, 0xf4, 0xbe,
+ 0xc1, 0x98, 0x61, 0x99, 0x0e, 0x6d, 0x1b, 0xcc, 0x75, 0x88, 0x6b, 0x58, 0x66, 0x7d, 0x78, 0xab,
+ 0x49, 0x5d, 0x72, 0xab, 0xde, 0xa6, 0x26, 0x75, 0x88, 0x4b, 0xf5, 0x9a, 0xed, 0x58, 0xae, 0x85,
+ 0x36, 0x3d, 0xce, 0x1a, 0xb1, 0x8d, 0x5a, 0x2a, 0x67, 0x4d, 0x72, 0x5e, 0xff, 0x66, 0xdb, 0x70,
+ 0x3b, 0x83, 0x66, 0x4d, 0xb3, 0xfa, 0xf5, 0xb6, 0xd5, 0xb6, 0xea, 0x42, 0x40, 0x73, 0xd0, 0x12,
+ 0x5f, 0xe2, 0x43, 0xfc, 0xe5, 0x09, 0xbe, 0xfe, 0x6e, 0x86, 0x23, 0x25, 0x4f, 0x73, 0xfd, 0xbd,
+ 0x90, 0xa9, 0x4f, 0xb4, 0x8e, 0x61, 0x52, 0xe7, 0xb8, 0x6e, 0x77, 0xdb, 0x1c, 0xc0, 0xea, 0x7d,
+ 0xea, 0x92, 0x34, 0xae, 0xfa, 0x34, 0x2e, 0x67, 0x60, 0xba, 0x46, 0x9f, 0x4e, 0x30, 0xbc, 0x7f,
+ 0x16, 0x03, 0xd3, 0x3a, 0xb4, 0x4f, 0x92, 0x7c, 0x2a, 0x83, 0xd5, 0xed, 0x81, 0x6e, 0xb8, 0xdb,
+ 0xa6, 0x69, 0xb9, 0xc2, 0x08, 0xf4, 0x16, 0xe4, 0xba, 0xf4, 0xb8, 0xac, 0x6c, 0x28, 0x9b, 0x85,
+ 0x46, 0xf1, 0xd9, 0xa8, 0x3a, 0x33, 0x1e, 0x55, 0x73, 0x9f, 0xd0, 0x63, 0xcc, 0xe1, 0x68, 0x1b,
+ 0x56, 0x87, 0xa4, 0x37, 0xa0, 0x77, 0x9e, 0xda, 0x0e, 0x15, 0x2e, 0x28, 0xcf, 0x0a, 0xd2, 0x75,
+ 0x49, 0xba, 0x7a, 0x18, 0x47, 0xe3, 0x24, 0xbd, 0xda, 0x83, 0x52, 0xf8, 0x75, 0x44, 0x1c, 0xd3,
+ 0x30, 0xdb, 0xe8, 0x1b, 0x90, 0x6f, 0x19, 0xb4, 0xa7, 0x63, 0xda, 0x92, 0x02, 0xd7, 0xa4, 0xc0,
+ 0xfc, 0x5d, 0x09, 0xc7, 0x01, 0x05, 0xba, 0x09, 0x8b, 0x4f, 0x3c, 0xc6, 0x72, 0x4e, 0x10, 0xaf,
+ 0x4a, 0xe2, 0x45, 0x29, 0x0f, 0xfb, 0x78, 0xb5, 0x05, 0x2b, 0xbb, 0xc4, 0xd5, 0x3a, 0x3b, 0x96,
+ 0xa9, 0x1b, 0xc2, 0xc2, 0x0d, 0x98, 0x33, 0x49, 0x9f, 0x4a, 0x13, 0x97, 0x24, 0xe7, 0xdc, 0x1e,
+ 0xe9, 0x53, 0x2c, 0x30, 0x68, 0x0b, 0x80, 0x26, 0xed, 0x43, 0x92, 0x0e, 0x22, 0xa6, 0x45, 0xa8,
+ 0xd4, 0x3f, 0xcd, 0x49, 0x45, 0x98, 0x32, 0x6b, 0xe0, 0x68, 0x94, 0xa1, 0xa7, 0x50, 0xe2, 0xe2,
+ 0x98, 0x4d, 0x34, 0x7a, 0x40, 0x7b, 0x54, 0x73, 0x2d, 0x47, 0x68, 0x2d, 0x6e, 0xbd, 0x5b, 0x0b,
+ 0xc3, 0x34, 0xb8, 0xb1, 0x9a, 0xdd, 0x6d, 0x73, 0x00, 0xab, 0xf1, 0xc0, 0xa8, 0x0d, 0x6f, 0xd5,
+ 0x1e, 0x92, 0x26, 0xed, 0xf9, 0xac, 0x8d, 0xd7, 0xc7, 0xa3, 0x6a, 0x69, 0x2f, 0x29, 0x11, 0x4f,
+ 0x2a, 0x41, 0x16, 0xac, 0x58, 0xcd, 0x1f, 0x52, 0xcd, 0x0d, 0xd4, 0xce, 0x5e, 0x5c, 0x2d, 0x1a,
+ 0x8f, 0xaa, 0x2b, 0x8f, 0x62, 0xe2, 0x70, 0x42, 0x3c, 0xfa, 0x29, 0x2c, 0x3b, 0xd2, 0x6e, 0x3c,
+ 0xe8, 0x51, 0x56, 0xce, 0x6d, 0xe4, 0x36, 0x8b, 0x5b, 0xdb, 0xb5, 0xac, 0xd9, 0x58, 0xe3, 0x76,
+ 0xe9, 0x9c, 0xf7, 0xc8, 0x70, 0x3b, 0x8f, 0x6c, 0xea, 0xa1, 0x59, 0xe3, 0x75, 0xe9, 0xf7, 0x65,
+ 0x1c, 0x95, 0x8f, 0xe3, 0xea, 0xd0, 0xaf, 0x14, 0xb8, 0x46, 0x9f, 0x6a, 0xbd, 0x81, 0x4e, 0x63,
+ 0x74, 0xe5, 0xb9, 0xcb, 0x3a, 0xc7, 0x0d, 0x79, 0x8e, 0x6b, 0x77, 0x52, 0xd4, 0xe0, 0x54, 0xe5,
+ 0xe8, 0x63, 0x28, 0xf6, 0x79, 0x48, 0xec, 0x5b, 0x3d, 0x43, 0x3b, 0x2e, 0x2f, 0x8a, 0x40, 0x52,
+ 0xc7, 0xa3, 0x6a, 0x71, 0x37, 0x04, 0x9f, 0x8e, 0xaa, 0xab, 0x91, 0xcf, 0x4f, 0x8f, 0x6d, 0x8a,
+ 0xa3, 0x6c, 0xea, 0x1f, 0xf3, 0xb0, 0xba, 0x3b, 0xe0, 0xe9, 0x69, 0xb6, 0x8f, 0x68, 0xb3, 0x63,
+ 0x59, 0xdd, 0x0c, 0x31, 0xfc, 0x04, 0x96, 0xb4, 0x9e, 0x41, 0x4d, 0x77, 0xc7, 0x32, 0x5b, 0x46,
+ 0x5b, 0x06, 0xc0, 0xb7, 0xb3, 0x3b, 0x42, 0xaa, 0xda, 0x89, 0x08, 0x69, 0x5c, 0x93, 0x8a, 0x96,
+ 0xa2, 0x50, 0x1c, 0x53, 0x84, 0x3e, 0x87, 0x79, 0x27, 0x12, 0x02, 0x1f, 0x64, 0xd1, 0x58, 0x4b,
+ 0x71, 0xf8, 0xb2, 0xd4, 0x35, 0xef, 0x79, 0xd8, 0x13, 0x8a, 0x1e, 0xc2, 0x72, 0x8b, 0x18, 0xbd,
+ 0x81, 0x43, 0xa5, 0x53, 0xe7, 0x84, 0x07, 0xbe, 0xce, 0x23, 0xe4, 0x6e, 0x14, 0x71, 0x3a, 0xaa,
+ 0x96, 0x62, 0x00, 0xe1, 0xd8, 0x38, 0x73, 0xf2, 0x82, 0x0a, 0x17, 0xba, 0xa0, 0xf4, 0x3c, 0x9f,
+ 0xff, 0xff, 0xe4, 0x79, 0xf1, 0xe5, 0xe6, 0xf9, 0xc7, 0x50, 0x64, 0x86, 0x4e, 0xef, 0xb4, 0x5a,
+ 0x54, 0x73, 0x59, 0x79, 0x21, 0x74, 0xd8, 0x41, 0x08, 0xe6, 0x0e, 0x0b, 0x3f, 0x77, 0x7a, 0x84,
+ 0x31, 0x1c, 0x65, 0x43, 0xb7, 0x61, 0x85, 0x77, 0x25, 0x6b, 0xe0, 0x1e, 0x50, 0xcd, 0x32, 0x75,
+ 0x26, 0x52, 0x63, 0xde, 0x3b, 0xc1, 0xa7, 0x31, 0x0c, 0x4e, 0x50, 0xa2, 0xcf, 0x60, 0x3d, 0x88,
+ 0x22, 0x4c, 0x87, 0x06, 0x7d, 0x72, 0x48, 0x1d, 0xfe, 0xc1, 0xca, 0xf9, 0x8d, 0xdc, 0x66, 0xa1,
+ 0xf1, 0xe6, 0x78, 0x54, 0x5d, 0xdf, 0x4e, 0x27, 0xc1, 0xd3, 0x78, 0xd1, 0x63, 0x40, 0x0e, 0x35,
+ 0xcc, 0xa1, 0xa5, 0x89, 0xf0, 0x93, 0x01, 0x01, 0xc2, 0xbe, 0x77, 0xc6, 0xa3, 0x2a, 0xc2, 0x13,
+ 0xd8, 0xd3, 0x51, 0xf5, 0x6b, 0x93, 0x50, 0x11, 0x1e, 0x29, 0xb2, 0xd0, 0x4f, 0x60, 0xb5, 0x1f,
+ 0x6b, 0x44, 0xac, 0xbc, 0x24, 0x32, 0xe4, 0xc3, 0xec, 0x39, 0x19, 0xef, 0x64, 0x61, 0xcf, 0x8d,
+ 0xc3, 0x19, 0x4e, 0x6a, 0x52, 0xff, 0xa2, 0xc0, 0x8d, 0x44, 0x0d, 0xf1, 0xd2, 0x75, 0xe0, 0x69,
+ 0x40, 0x8f, 0x21, 0xcf, 0xa3, 0x42, 0x27, 0x2e, 0x91, 0x2d, 0xea, 0x9d, 0x6c, 0x31, 0xe4, 0x05,
+ 0xcc, 0x2e, 0x75, 0x49, 0xd8, 0x22, 0x43, 0x18, 0x0e, 0xa4, 0xa2, 0x1f, 0x40, 0x5e, 0x6a, 0x66,
+ 0xe5, 0x59, 0x61, 0xf8, 0xb7, 0xce, 0x61, 0x78, 0xfc, 0xec, 0x8d, 0x39, 0xae, 0x0a, 0x07, 0x02,
+ 0xd5, 0x7f, 0x28, 0xb0, 0xf1, 0x22, 0xfb, 0x1e, 0x1a, 0xcc, 0x45, 0x9f, 0x4f, 0xd8, 0x58, 0xcb,
+ 0x98, 0x27, 0x06, 0xf3, 0x2c, 0x0c, 0x66, 0x12, 0x1f, 0x12, 0xb1, 0xaf, 0x0b, 0xf3, 0x86, 0x4b,
+ 0xfb, 0xbe, 0x71, 0x77, 0x2f, 0x6c, 0x5c, 0xec, 0xe0, 0x61, 0x19, 0xbc, 0xcf, 0x85, 0x63, 0x4f,
+ 0x87, 0xfa, 0x5c, 0x81, 0xf5, 0x29, 0x9d, 0x0a, 0x7d, 0x10, 0xf6, 0x62, 0x51, 0x44, 0xca, 0x8a,
+ 0xc8, 0x8b, 0x52, 0xb4, 0x89, 0x0a, 0x04, 0x8e, 0xd3, 0xa1, 0x5f, 0x28, 0x80, 0x9c, 0x09, 0x79,
+ 0xb2, 0x73, 0x5c, 0xb8, 0x8e, 0x5f, 0x97, 0x06, 0xa0, 0x49, 0x1c, 0x4e, 0x51, 0xa7, 0x12, 0x28,
+ 0xec, 0x13, 0x87, 0xf4, 0x3f, 0x31, 0x4c, 0x9d, 0x4f, 0x62, 0xc4, 0x36, 0x64, 0x96, 0xca, 0x6e,
+ 0x17, 0x84, 0xd9, 0xf6, 0xfe, 0x7d, 0x89, 0xc1, 0x11, 0x2a, 0xde, 0x1b, 0xbb, 0x86, 0xa9, 0xcb,
+ 0xb9, 0x2d, 0xe8, 0x8d, 0x5c, 0x1e, 0x16, 0x18, 0xf5, 0x77, 0xb3, 0x90, 0x17, 0x3a, 0xf8, 0x2c,
+ 0x79, 0x76, 0x2b, 0xad, 0x43, 0x21, 0x28, 0xbd, 0x52, 0x6a, 0x49, 0x92, 0x15, 0x82, 0x32, 0x8d,
+ 0x43, 0x1a, 0xf4, 0x05, 0xe4, 0x99, 0x5f, 0x90, 0x73, 0x17, 0x2f, 0xc8, 0x4b, 0x3c, 0xd2, 0x82,
+ 0x52, 0x1c, 0x88, 0x44, 0x2e, 0xac, 0xdb, 0xfc, 0xf4, 0xd4, 0xa5, 0xce, 0x9e, 0xe5, 0xde, 0xb5,
+ 0x06, 0xa6, 0xbe, 0xad, 0x71, 0xef, 0xc9, 0x6e, 0x78, 0x9b, 0x97, 0xc0, 0xfd, 0x74, 0x92, 0xd3,
+ 0x51, 0xf5, 0xcd, 0x29, 0x28, 0x51, 0xba, 0xa6, 0x89, 0x56, 0x7f, 0xab, 0xc0, 0xda, 0x01, 0x75,
+ 0x86, 0x86, 0x46, 0x31, 0x6d, 0x51, 0x87, 0x9a, 0x5a, 0xc2, 0x35, 0x4a, 0x06, 0xd7, 0xf8, 0xde,
+ 0x9e, 0x9d, 0xea, 0xed, 0x1b, 0x30, 0x67, 0x13, 0xb7, 0x23, 0x07, 0xfb, 0x3c, 0xc7, 0xee, 0x13,
+ 0xb7, 0x83, 0x05, 0x54, 0x60, 0x2d, 0xc7, 0x15, 0x86, 0xce, 0x4b, 0xac, 0xe5, 0xb8, 0x58, 0x40,
+ 0xd5, 0x5f, 0x2b, 0xb0, 0xc4, 0xad, 0xd8, 0xe9, 0x50, 0xad, 0xcb, 0x9f, 0x15, 0x5f, 0x2a, 0x80,
+ 0x68, 0xf2, 0xb1, 0xe1, 0x65, 0x44, 0x71, 0xeb, 0xa3, 0xec, 0x29, 0x3a, 0xf1, 0x60, 0x09, 0xc3,
+ 0x7a, 0x02, 0xc5, 0x70, 0x8a, 0x4a, 0xf5, 0xcf, 0xb3, 0xf0, 0xc6, 0x21, 0xe9, 0x19, 0xba, 0x48,
+ 0xf5, 0xa0, 0x3f, 0xc9, 0xe6, 0xf0, 0xf2, 0xcb, 0xaf, 0x01, 0x73, 0xcc, 0xa6, 0x9a, 0xcc, 0xe6,
+ 0x7b, 0xd9, 0x4d, 0x9f, 0x7a, 0xe8, 0x03, 0x9b, 0x6a, 0xe1, 0x0d, 0xf2, 0x2f, 0x2c, 0x54, 0xa0,
+ 0x1f, 0xc1, 0x02, 0x73, 0x89, 0x3b, 0x60, 0x32, 0xf8, 0xef, 0x5f, 0x86, 0x32, 0x21, 0xb0, 0xb1,
+ 0x22, 0xd5, 0x2d, 0x78, 0xdf, 0x58, 0x2a, 0x52, 0xff, 0xad, 0xc0, 0xc6, 0x54, 0xde, 0x86, 0x61,
+ 0xea, 0x3c, 0x18, 0x5e, 0xbe, 0x93, 0xed, 0x98, 0x93, 0xf7, 0x2e, 0xc1, 0x6e, 0x79, 0xf6, 0x69,
+ 0xbe, 0x56, 0xff, 0xa5, 0xc0, 0xdb, 0x67, 0x31, 0x5f, 0x41, 0xf3, 0xb3, 0xe2, 0xcd, 0xef, 0xc1,
+ 0xe5, 0x59, 0x3e, 0xa5, 0x01, 0x7e, 0x99, 0x3b, 0xdb, 0x6e, 0xee, 0x26, 0xde, 0x41, 0x6c, 0x01,
+ 0xdc, 0x0b, 0x8b, 0x7c, 0x70, 0x89, 0xfb, 0x01, 0x06, 0x47, 0xa8, 0xb8, 0xaf, 0x6c, 0xd9, 0x1e,
+ 0xe4, 0x55, 0x6e, 0x65, 0x37, 0xc8, 0x6f, 0x2c, 0x5e, 0xf9, 0xf6, 0xbf, 0x70, 0x20, 0x11, 0xb9,
+ 0xb0, 0xd2, 0x8f, 0x2d, 0x0a, 0x64, 0x9a, 0x9c, 0x77, 0x0e, 0x0c, 0xf8, 0xbd, 0xb9, 0x39, 0x0e,
+ 0xc3, 0x09, 0x1d, 0xe8, 0x08, 0x4a, 0x43, 0xe9, 0x2f, 0xcb, 0xf4, 0x4a, 0xba, 0xf7, 0x3a, 0x2e,
+ 0x34, 0x6e, 0xf2, 0xf7, 0xc6, 0x61, 0x12, 0x79, 0x3a, 0xaa, 0xae, 0x25, 0x81, 0x78, 0x52, 0x86,
+ 0xfa, 0x77, 0x05, 0xde, 0x9a, 0x7a, 0x13, 0x57, 0x10, 0x7a, 0x9d, 0x78, 0xe8, 0xed, 0x5c, 0x46,
+ 0xe8, 0xa5, 0xc7, 0xdc, 0x6f, 0x16, 0x5e, 0x60, 0xa9, 0x08, 0xb6, 0xc7, 0x50, 0xb0, 0xfd, 0xd9,
+ 0x25, 0x65, 0xd3, 0x93, 0x25, 0x72, 0x38, 0x6b, 0x63, 0x99, 0xf7, 0xcf, 0xe0, 0x13, 0x87, 0x42,
+ 0xd1, 0x8f, 0x61, 0xcd, 0x9f, 0xed, 0x39, 0xbf, 0x61, 0xba, 0xfe, 0x80, 0x76, 0xf1, 0xf0, 0xb9,
+ 0x36, 0x1e, 0x55, 0xd7, 0x76, 0x13, 0x52, 0xf1, 0x84, 0x1e, 0xd4, 0x85, 0x62, 0x78, 0xfd, 0xfe,
+ 0xfb, 0xfe, 0xbd, 0xf3, 0xfb, 0xdb, 0x32, 0x1b, 0xaf, 0x49, 0x07, 0x17, 0x43, 0x18, 0xc3, 0x51,
+ 0xe9, 0x97, 0xfc, 0xd0, 0xff, 0x19, 0xac, 0x91, 0xf8, 0xa2, 0x93, 0x95, 0xe7, 0xcf, 0xfb, 0x08,
+ 0x49, 0xac, 0x4a, 0x1b, 0x65, 0x69, 0xc4, 0x5a, 0x02, 0xc1, 0xf0, 0x84, 0xb2, 0xb4, 0xd7, 0xdf,
+ 0xc2, 0x55, 0xbd, 0xfe, 0x90, 0x06, 0x85, 0x21, 0x71, 0x0c, 0xd2, 0xec, 0x51, 0xfe, 0xd4, 0xce,
+ 0x9d, 0xaf, 0xa0, 0x1d, 0x4a, 0xd6, 0x70, 0xb2, 0xf3, 0x21, 0x0c, 0x87, 0x72, 0xd5, 0x3f, 0xcc,
+ 0x42, 0xf5, 0x8c, 0xf6, 0x8d, 0x1e, 0x00, 0xb2, 0x9a, 0x8c, 0x3a, 0x43, 0xaa, 0xdf, 0xf3, 0x56,
+ 0xd1, 0xfe, 0x58, 0x9f, 0x0b, 0x07, 0xaa, 0x47, 0x13, 0x14, 0x38, 0x85, 0x0b, 0xf5, 0x60, 0xc9,
+ 0x8d, 0x8c, 0x7a, 0x32, 0x0b, 0xde, 0xcf, 0x6e, 0x57, 0x74, 0x50, 0x6c, 0xac, 0x8d, 0x47, 0xd5,
+ 0xd8, 0xe8, 0x88, 0x63, 0xd2, 0x91, 0x06, 0xa0, 0x85, 0x57, 0xe7, 0x85, 0x7e, 0x3d, 0x5b, 0x15,
+ 0x0b, 0x6f, 0x2c, 0xe8, 0x3b, 0x91, 0xcb, 0x8a, 0x88, 0x55, 0x4f, 0x16, 0xa1, 0x14, 0xba, 0xf0,
+ 0xd5, 0xae, 0xef, 0xd5, 0xae, 0xef, 0x85, 0xbb, 0x3e, 0x78, 0xb5, 0xeb, 0xbb, 0xd0, 0xae, 0x2f,
+ 0xa5, 0x16, 0x17, 0xaf, 0x6c, 0x13, 0x77, 0xa2, 0x40, 0x65, 0x22, 0xc7, 0xaf, 0x7a, 0x17, 0xf7,
+ 0xc5, 0xc4, 0x2e, 0xee, 0xa3, 0x8b, 0x8c, 0x4d, 0xd3, 0xb6, 0x71, 0xff, 0x54, 0x40, 0x7d, 0xb1,
+ 0x8d, 0x57, 0x30, 0x17, 0xf6, 0xe3, 0x73, 0xe1, 0x77, 0xff, 0x07, 0x03, 0xb3, 0x6c, 0xe4, 0xfe,
+ 0xa3, 0x00, 0x84, 0xc3, 0x0c, 0x7a, 0x1b, 0x22, 0x3f, 0x14, 0xca, 0xd2, 0xed, 0xb9, 0x29, 0x02,
+ 0x47, 0x37, 0x61, 0xb1, 0x4f, 0x19, 0x23, 0x6d, 0x7f, 0x21, 0x12, 0xfc, 0x8e, 0xb9, 0xeb, 0x81,
+ 0xb1, 0x8f, 0x47, 0x47, 0xb0, 0xe0, 0x50, 0xc2, 0x2c, 0x53, 0x2e, 0x46, 0xbe, 0xc3, 0x5f, 0xc1,
+ 0x58, 0x40, 0x4e, 0x47, 0xd5, 0x5b, 0x59, 0x7e, 0x67, 0xae, 0xc9, 0x47, 0xb3, 0x60, 0xc2, 0x52,
+ 0x1c, 0xba, 0x07, 0x25, 0xa9, 0x23, 0x72, 0x60, 0xaf, 0xd2, 0xbe, 0x21, 0x4f, 0x53, 0xda, 0x4d,
+ 0x12, 0xe0, 0x49, 0x1e, 0xf5, 0x01, 0xe4, 0xfd, 0xc1, 0x00, 0x95, 0x61, 0x2e, 0xf2, 0xde, 0xf2,
+ 0x0c, 0x17, 0x90, 0x84, 0x63, 0x66, 0xd3, 0x1d, 0xa3, 0xfe, 0x5e, 0x81, 0xd7, 0x52, 0x9a, 0x12,
+ 0x7a, 0x03, 0x72, 0x03, 0xa7, 0x27, 0x5d, 0xb0, 0x38, 0x1e, 0x55, 0x73, 0x9f, 0xe1, 0x87, 0x98,
+ 0xc3, 0x10, 0x81, 0x45, 0xe6, 0xad, 0xa7, 0x64, 0x30, 0xdd, 0xce, 0x7e, 0xe3, 0xc9, 0xbd, 0x56,
+ 0xa3, 0xc8, 0xef, 0xc0, 0x87, 0xfa, 0x72, 0xd1, 0x26, 0xe4, 0x35, 0xd2, 0x18, 0x98, 0x7a, 0xcf,
+ 0xbb, 0xaf, 0x25, 0xef, 0x8d, 0xb7, 0xb3, 0xed, 0xc1, 0x70, 0x80, 0x6d, 0xec, 0x3d, 0x3b, 0xa9,
+ 0xcc, 0x7c, 0x75, 0x52, 0x99, 0x79, 0x7e, 0x52, 0x99, 0xf9, 0xf9, 0xb8, 0xa2, 0x3c, 0x1b, 0x57,
+ 0x94, 0xaf, 0xc6, 0x15, 0xe5, 0xf9, 0xb8, 0xa2, 0xfc, 0x75, 0x5c, 0x51, 0x7e, 0xf9, 0xb7, 0xca,
+ 0xcc, 0xf7, 0x37, 0xb3, 0xfe, 0x97, 0xc3, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x71, 0x54, 0x54,
+ 0xe6, 0x29, 0x21, 0x00, 0x00,
+}
+
+func (m *AuditAnnotation) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AuditAnnotation) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuditAnnotation) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i -= len(m.ValueExpression)
+ copy(dAtA[i:], m.ValueExpression)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.ValueExpression)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.Key)
+ copy(dAtA[i:], m.Key)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ExpressionWarning) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ExpressionWarning) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ExpressionWarning) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i -= len(m.Warning)
+ copy(dAtA[i:], m.Warning)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Warning)))
+ i--
+ dAtA[i] = 0x1a
+ i -= len(m.FieldRef)
+ copy(dAtA[i:], m.FieldRef)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldRef)))
+ i--
+ dAtA[i] = 0x12
+ return len(dAtA) - i, nil
}
func (m *MatchCondition) Marshal() (dAtA []byte, err error) {
@@ -416,6 +1005,88 @@ func (m *MatchCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) {
return len(dAtA) - i, nil
}
+func (m *MatchResources) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *MatchResources) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MatchResources) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.MatchPolicy != nil {
+ i -= len(*m.MatchPolicy)
+ copy(dAtA[i:], *m.MatchPolicy)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.MatchPolicy)))
+ i--
+ dAtA[i] = 0x3a
+ }
+ if len(m.ExcludeResourceRules) > 0 {
+ for iNdEx := len(m.ExcludeResourceRules) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.ExcludeResourceRules[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ }
+ }
+ if len(m.ResourceRules) > 0 {
+ for iNdEx := len(m.ResourceRules) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.ResourceRules[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ }
+ if m.ObjectSelector != nil {
+ {
+ size, err := m.ObjectSelector.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.NamespaceSelector != nil {
+ {
+ size, err := m.NamespaceSelector.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
func (m *MutatingWebhook) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
@@ -642,7 +1313,7 @@ func (m *MutatingWebhookConfigurationList) MarshalToSizedBuffer(dAtA []byte) (in
return len(dAtA) - i, nil
}
-func (m *ServiceReference) Marshal() (dAtA []byte, err error) {
+func (m *NamedRuleWithOperations) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
@@ -652,42 +1323,72 @@ func (m *ServiceReference) Marshal() (dAtA []byte, err error) {
return dAtA[:n], nil
}
-func (m *ServiceReference) MarshalTo(dAtA []byte) (int, error) {
+func (m *NamedRuleWithOperations) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
-func (m *ServiceReference) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+func (m *NamedRuleWithOperations) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
- if m.Port != nil {
- i = encodeVarintGenerated(dAtA, i, uint64(*m.Port))
- i--
- dAtA[i] = 0x20
+ {
+ size, err := m.RuleWithOperations.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
}
- if m.Path != nil {
- i -= len(*m.Path)
- copy(dAtA[i:], *m.Path)
- i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Path)))
- i--
- dAtA[i] = 0x1a
+ i--
+ dAtA[i] = 0x12
+ if len(m.ResourceNames) > 0 {
+ for iNdEx := len(m.ResourceNames) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.ResourceNames[iNdEx])
+ copy(dAtA[i:], m.ResourceNames[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceNames[iNdEx])))
+ i--
+ dAtA[i] = 0xa
+ }
}
- i -= len(m.Name)
- copy(dAtA[i:], m.Name)
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+ return len(dAtA) - i, nil
+}
+
+func (m *ParamKind) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ParamKind) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ParamKind) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i -= len(m.Kind)
+ copy(dAtA[i:], m.Kind)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind)))
i--
dAtA[i] = 0x12
- i -= len(m.Namespace)
- copy(dAtA[i:], m.Namespace)
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace)))
+ i -= len(m.APIVersion)
+ copy(dAtA[i:], m.APIVersion)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion)))
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
-func (m *ValidatingWebhook) Marshal() (dAtA []byte, err error) {
+func (m *ParamRef) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
@@ -697,33 +1398,26 @@ func (m *ValidatingWebhook) Marshal() (dAtA []byte, err error) {
return dAtA[:n], nil
}
-func (m *ValidatingWebhook) MarshalTo(dAtA []byte) (int, error) {
+func (m *ParamRef) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
-func (m *ValidatingWebhook) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+func (m *ParamRef) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
- if len(m.MatchConditions) > 0 {
- for iNdEx := len(m.MatchConditions) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.MatchConditions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x5a
- }
+ if m.ParameterNotFoundAction != nil {
+ i -= len(*m.ParameterNotFoundAction)
+ copy(dAtA[i:], *m.ParameterNotFoundAction)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ParameterNotFoundAction)))
+ i--
+ dAtA[i] = 0x22
}
- if m.ObjectSelector != nil {
+ if m.Selector != nil {
{
- size, err := m.ObjectSelector.MarshalToSizedBuffer(dAtA[:i])
+ size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
@@ -731,59 +1425,90 @@ func (m *ValidatingWebhook) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
- dAtA[i] = 0x52
- }
- if m.MatchPolicy != nil {
- i -= len(*m.MatchPolicy)
- copy(dAtA[i:], *m.MatchPolicy)
- i = encodeVarintGenerated(dAtA, i, uint64(len(*m.MatchPolicy)))
- i--
- dAtA[i] = 0x4a
- }
- if len(m.AdmissionReviewVersions) > 0 {
- for iNdEx := len(m.AdmissionReviewVersions) - 1; iNdEx >= 0; iNdEx-- {
- i -= len(m.AdmissionReviewVersions[iNdEx])
- copy(dAtA[i:], m.AdmissionReviewVersions[iNdEx])
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.AdmissionReviewVersions[iNdEx])))
- i--
- dAtA[i] = 0x42
- }
- }
- if m.TimeoutSeconds != nil {
- i = encodeVarintGenerated(dAtA, i, uint64(*m.TimeoutSeconds))
- i--
- dAtA[i] = 0x38
+ dAtA[i] = 0x1a
}
- if m.SideEffects != nil {
- i -= len(*m.SideEffects)
- copy(dAtA[i:], *m.SideEffects)
- i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SideEffects)))
- i--
- dAtA[i] = 0x32
+ i -= len(m.Namespace)
+ copy(dAtA[i:], m.Namespace)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ServiceReference) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
}
- if m.NamespaceSelector != nil {
- {
- size, err := m.NamespaceSelector.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
+ return dAtA[:n], nil
+}
+
+func (m *ServiceReference) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ServiceReference) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Port != nil {
+ i = encodeVarintGenerated(dAtA, i, uint64(*m.Port))
i--
- dAtA[i] = 0x2a
+ dAtA[i] = 0x20
}
- if m.FailurePolicy != nil {
- i -= len(*m.FailurePolicy)
- copy(dAtA[i:], *m.FailurePolicy)
- i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FailurePolicy)))
+ if m.Path != nil {
+ i -= len(*m.Path)
+ copy(dAtA[i:], *m.Path)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Path)))
i--
- dAtA[i] = 0x22
+ dAtA[i] = 0x1a
}
- if len(m.Rules) > 0 {
- for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.Namespace)
+ copy(dAtA[i:], m.Namespace)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *TypeChecking) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *TypeChecking) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *TypeChecking) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.ExpressionWarnings) > 0 {
+ for iNdEx := len(m.ExpressionWarnings) - 1; iNdEx >= 0; iNdEx-- {
{
- size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ size, err := m.ExpressionWarnings[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
@@ -791,11 +1516,44 @@ func (m *ValidatingWebhook) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
- dAtA[i] = 0x1a
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *ValidatingAdmissionPolicy) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ValidatingAdmissionPolicy) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ValidatingAdmissionPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.Status.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
}
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
}
+ i--
+ dAtA[i] = 0x1a
{
- size, err := m.ClientConfig.MarshalToSizedBuffer(dAtA[:i])
+ size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
@@ -804,15 +1562,20 @@ func (m *ValidatingWebhook) MarshalToSizedBuffer(dAtA []byte) (int, error) {
}
i--
dAtA[i] = 0x12
- i -= len(m.Name)
- copy(dAtA[i:], m.Name)
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+ {
+ size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
-func (m *ValidatingWebhookConfiguration) Marshal() (dAtA []byte, err error) {
+func (m *ValidatingAdmissionPolicyBinding) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
@@ -822,30 +1585,26 @@ func (m *ValidatingWebhookConfiguration) Marshal() (dAtA []byte, err error) {
return dAtA[:n], nil
}
-func (m *ValidatingWebhookConfiguration) MarshalTo(dAtA []byte) (int, error) {
+func (m *ValidatingAdmissionPolicyBinding) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
-func (m *ValidatingWebhookConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+func (m *ValidatingAdmissionPolicyBinding) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
- if len(m.Webhooks) > 0 {
- for iNdEx := len(m.Webhooks) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Webhooks[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
+ {
+ size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
}
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
}
+ i--
+ dAtA[i] = 0x12
{
size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
@@ -859,7 +1618,7 @@ func (m *ValidatingWebhookConfiguration) MarshalToSizedBuffer(dAtA []byte) (int,
return len(dAtA) - i, nil
}
-func (m *ValidatingWebhookConfigurationList) Marshal() (dAtA []byte, err error) {
+func (m *ValidatingAdmissionPolicyBindingList) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
@@ -869,12 +1628,12 @@ func (m *ValidatingWebhookConfigurationList) Marshal() (dAtA []byte, err error)
return dAtA[:n], nil
}
-func (m *ValidatingWebhookConfigurationList) MarshalTo(dAtA []byte) (int, error) {
+func (m *ValidatingAdmissionPolicyBindingList) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
-func (m *ValidatingWebhookConfigurationList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+func (m *ValidatingAdmissionPolicyBindingList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
@@ -906,7 +1665,7 @@ func (m *ValidatingWebhookConfigurationList) MarshalToSizedBuffer(dAtA []byte) (
return len(dAtA) - i, nil
}
-func (m *WebhookClientConfig) Marshal() (dAtA []byte, err error) {
+func (m *ValidatingAdmissionPolicyBindingSpec) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
@@ -916,33 +1675,40 @@ func (m *WebhookClientConfig) Marshal() (dAtA []byte, err error) {
return dAtA[:n], nil
}
-func (m *WebhookClientConfig) MarshalTo(dAtA []byte) (int, error) {
+func (m *ValidatingAdmissionPolicyBindingSpec) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
-func (m *WebhookClientConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+func (m *ValidatingAdmissionPolicyBindingSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
- if m.URL != nil {
- i -= len(*m.URL)
- copy(dAtA[i:], *m.URL)
- i = encodeVarintGenerated(dAtA, i, uint64(len(*m.URL)))
- i--
- dAtA[i] = 0x1a
+ if len(m.ValidationActions) > 0 {
+ for iNdEx := len(m.ValidationActions) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.ValidationActions[iNdEx])
+ copy(dAtA[i:], m.ValidationActions[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.ValidationActions[iNdEx])))
+ i--
+ dAtA[i] = 0x22
+ }
}
- if m.CABundle != nil {
- i -= len(m.CABundle)
- copy(dAtA[i:], m.CABundle)
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.CABundle)))
+ if m.MatchResources != nil {
+ {
+ size, err := m.MatchResources.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
i--
- dAtA[i] = 0x12
+ dAtA[i] = 0x1a
}
- if m.Service != nil {
+ if m.ParamRef != nil {
{
- size, err := m.Service.MarshalToSizedBuffer(dAtA[:i])
+ size, err := m.ParamRef.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
@@ -950,432 +1716,3359 @@ func (m *WebhookClientConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
- dAtA[i] = 0xa
+ dAtA[i] = 0x12
}
+ i -= len(m.PolicyName)
+ copy(dAtA[i:], m.PolicyName)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.PolicyName)))
+ i--
+ dAtA[i] = 0xa
return len(dAtA) - i, nil
}
-func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
- offset -= sovGenerated(v)
- base := offset
- for v >= 1<<7 {
- dAtA[offset] = uint8(v&0x7f | 0x80)
- v >>= 7
- offset++
+func (m *ValidatingAdmissionPolicyList) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
}
- dAtA[offset] = uint8(v)
- return base
+ return dAtA[:n], nil
}
-func (m *MatchCondition) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = len(m.Name)
- n += 1 + l + sovGenerated(uint64(l))
- l = len(m.Expression)
- n += 1 + l + sovGenerated(uint64(l))
- return n
+
+func (m *ValidatingAdmissionPolicyList) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
}
-func (m *MutatingWebhook) Size() (n int) {
- if m == nil {
- return 0
- }
+func (m *ValidatingAdmissionPolicyList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
var l int
_ = l
- l = len(m.Name)
- n += 1 + l + sovGenerated(uint64(l))
- l = m.ClientConfig.Size()
- n += 1 + l + sovGenerated(uint64(l))
- if len(m.Rules) > 0 {
- for _, e := range m.Rules {
- l = e.Size()
- n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
}
}
- if m.FailurePolicy != nil {
- l = len(*m.FailurePolicy)
- n += 1 + l + sovGenerated(uint64(l))
+ {
+ size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
}
- if m.NamespaceSelector != nil {
- l = m.NamespaceSelector.Size()
- n += 1 + l + sovGenerated(uint64(l))
- }
- if m.SideEffects != nil {
- l = len(*m.SideEffects)
- n += 1 + l + sovGenerated(uint64(l))
- }
- if m.TimeoutSeconds != nil {
- n += 1 + sovGenerated(uint64(*m.TimeoutSeconds))
- }
- if len(m.AdmissionReviewVersions) > 0 {
- for _, s := range m.AdmissionReviewVersions {
- l = len(s)
- n += 1 + l + sovGenerated(uint64(l))
- }
- }
- if m.MatchPolicy != nil {
- l = len(*m.MatchPolicy)
- n += 1 + l + sovGenerated(uint64(l))
- }
- if m.ReinvocationPolicy != nil {
- l = len(*m.ReinvocationPolicy)
- n += 1 + l + sovGenerated(uint64(l))
- }
- if m.ObjectSelector != nil {
- l = m.ObjectSelector.Size()
- n += 1 + l + sovGenerated(uint64(l))
- }
- if len(m.MatchConditions) > 0 {
- for _, e := range m.MatchConditions {
- l = e.Size()
- n += 1 + l + sovGenerated(uint64(l))
- }
- }
- return n
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
}
-func (m *MutatingWebhookConfiguration) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = m.ObjectMeta.Size()
- n += 1 + l + sovGenerated(uint64(l))
- if len(m.Webhooks) > 0 {
- for _, e := range m.Webhooks {
- l = e.Size()
- n += 1 + l + sovGenerated(uint64(l))
- }
+func (m *ValidatingAdmissionPolicySpec) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
}
- return n
+ return dAtA[:n], nil
}
-func (m *MutatingWebhookConfigurationList) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = m.ListMeta.Size()
- n += 1 + l + sovGenerated(uint64(l))
- if len(m.Items) > 0 {
- for _, e := range m.Items {
- l = e.Size()
- n += 1 + l + sovGenerated(uint64(l))
- }
- }
- return n
+func (m *ValidatingAdmissionPolicySpec) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
}
-func (m *ServiceReference) Size() (n int) {
- if m == nil {
- return 0
- }
+func (m *ValidatingAdmissionPolicySpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
var l int
_ = l
- l = len(m.Namespace)
- n += 1 + l + sovGenerated(uint64(l))
- l = len(m.Name)
- n += 1 + l + sovGenerated(uint64(l))
- if m.Path != nil {
- l = len(*m.Path)
- n += 1 + l + sovGenerated(uint64(l))
- }
- if m.Port != nil {
- n += 1 + sovGenerated(uint64(*m.Port))
+ if len(m.Variables) > 0 {
+ for iNdEx := len(m.Variables) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Variables[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x3a
+ }
}
- return n
-}
-
-func (m *ValidatingWebhook) Size() (n int) {
- if m == nil {
- return 0
+ if len(m.MatchConditions) > 0 {
+ for iNdEx := len(m.MatchConditions) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.MatchConditions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x32
+ }
}
- var l int
- _ = l
- l = len(m.Name)
- n += 1 + l + sovGenerated(uint64(l))
- l = m.ClientConfig.Size()
- n += 1 + l + sovGenerated(uint64(l))
- if len(m.Rules) > 0 {
- for _, e := range m.Rules {
- l = e.Size()
- n += 1 + l + sovGenerated(uint64(l))
+ if len(m.AuditAnnotations) > 0 {
+ for iNdEx := len(m.AuditAnnotations) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.AuditAnnotations[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x2a
}
}
if m.FailurePolicy != nil {
- l = len(*m.FailurePolicy)
- n += 1 + l + sovGenerated(uint64(l))
- }
- if m.NamespaceSelector != nil {
- l = m.NamespaceSelector.Size()
- n += 1 + l + sovGenerated(uint64(l))
- }
- if m.SideEffects != nil {
- l = len(*m.SideEffects)
- n += 1 + l + sovGenerated(uint64(l))
- }
- if m.TimeoutSeconds != nil {
- n += 1 + sovGenerated(uint64(*m.TimeoutSeconds))
+ i -= len(*m.FailurePolicy)
+ copy(dAtA[i:], *m.FailurePolicy)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FailurePolicy)))
+ i--
+ dAtA[i] = 0x22
}
- if len(m.AdmissionReviewVersions) > 0 {
- for _, s := range m.AdmissionReviewVersions {
- l = len(s)
- n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Validations) > 0 {
+ for iNdEx := len(m.Validations) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Validations[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
}
}
- if m.MatchPolicy != nil {
- l = len(*m.MatchPolicy)
- n += 1 + l + sovGenerated(uint64(l))
- }
- if m.ObjectSelector != nil {
- l = m.ObjectSelector.Size()
- n += 1 + l + sovGenerated(uint64(l))
+ if m.MatchConstraints != nil {
+ {
+ size, err := m.MatchConstraints.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
}
- if len(m.MatchConditions) > 0 {
- for _, e := range m.MatchConditions {
- l = e.Size()
- n += 1 + l + sovGenerated(uint64(l))
+ if m.ParamKind != nil {
+ {
+ size, err := m.ParamKind.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
}
+ i--
+ dAtA[i] = 0xa
}
- return n
+ return len(dAtA) - i, nil
}
-func (m *ValidatingWebhookConfiguration) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = m.ObjectMeta.Size()
- n += 1 + l + sovGenerated(uint64(l))
- if len(m.Webhooks) > 0 {
- for _, e := range m.Webhooks {
- l = e.Size()
- n += 1 + l + sovGenerated(uint64(l))
- }
+func (m *ValidatingAdmissionPolicyStatus) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
}
- return n
+ return dAtA[:n], nil
}
-func (m *ValidatingWebhookConfigurationList) Size() (n int) {
- if m == nil {
- return 0
- }
+func (m *ValidatingAdmissionPolicyStatus) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ValidatingAdmissionPolicyStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
var l int
_ = l
- l = m.ListMeta.Size()
- n += 1 + l + sovGenerated(uint64(l))
- if len(m.Items) > 0 {
- for _, e := range m.Items {
- l = e.Size()
- n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Conditions) > 0 {
+ for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
}
}
- return n
+ if m.TypeChecking != nil {
+ {
+ size, err := m.TypeChecking.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration))
+ i--
+ dAtA[i] = 0x8
+ return len(dAtA) - i, nil
}
-func (m *WebhookClientConfig) Size() (n int) {
- if m == nil {
- return 0
+func (m *ValidatingWebhook) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
}
+ return dAtA[:n], nil
+}
+
+func (m *ValidatingWebhook) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ValidatingWebhook) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
var l int
_ = l
- if m.Service != nil {
- l = m.Service.Size()
- n += 1 + l + sovGenerated(uint64(l))
- }
- if m.CABundle != nil {
- l = len(m.CABundle)
- n += 1 + l + sovGenerated(uint64(l))
+ if len(m.MatchConditions) > 0 {
+ for iNdEx := len(m.MatchConditions) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.MatchConditions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x5a
+ }
}
- if m.URL != nil {
- l = len(*m.URL)
- n += 1 + l + sovGenerated(uint64(l))
+ if m.ObjectSelector != nil {
+ {
+ size, err := m.ObjectSelector.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x52
}
- return n
-}
-
-func sovGenerated(x uint64) (n int) {
- return (math_bits.Len64(x|1) + 6) / 7
-}
-func sozGenerated(x uint64) (n int) {
- return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
-}
-func (this *MatchCondition) String() string {
- if this == nil {
- return "nil"
+ if m.MatchPolicy != nil {
+ i -= len(*m.MatchPolicy)
+ copy(dAtA[i:], *m.MatchPolicy)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.MatchPolicy)))
+ i--
+ dAtA[i] = 0x4a
}
- s := strings.Join([]string{`&MatchCondition{`,
- `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
- `Expression:` + fmt.Sprintf("%v", this.Expression) + `,`,
- `}`,
- }, "")
- return s
-}
-func (this *MutatingWebhook) String() string {
- if this == nil {
- return "nil"
+ if len(m.AdmissionReviewVersions) > 0 {
+ for iNdEx := len(m.AdmissionReviewVersions) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.AdmissionReviewVersions[iNdEx])
+ copy(dAtA[i:], m.AdmissionReviewVersions[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.AdmissionReviewVersions[iNdEx])))
+ i--
+ dAtA[i] = 0x42
+ }
}
- repeatedStringForRules := "[]RuleWithOperations{"
- for _, f := range this.Rules {
- repeatedStringForRules += fmt.Sprintf("%v", f) + ","
+ if m.TimeoutSeconds != nil {
+ i = encodeVarintGenerated(dAtA, i, uint64(*m.TimeoutSeconds))
+ i--
+ dAtA[i] = 0x38
}
- repeatedStringForRules += "}"
- repeatedStringForMatchConditions := "[]MatchCondition{"
- for _, f := range this.MatchConditions {
- repeatedStringForMatchConditions += strings.Replace(strings.Replace(f.String(), "MatchCondition", "MatchCondition", 1), `&`, ``, 1) + ","
+ if m.SideEffects != nil {
+ i -= len(*m.SideEffects)
+ copy(dAtA[i:], *m.SideEffects)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SideEffects)))
+ i--
+ dAtA[i] = 0x32
}
- repeatedStringForMatchConditions += "}"
- s := strings.Join([]string{`&MutatingWebhook{`,
- `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
- `ClientConfig:` + strings.Replace(strings.Replace(this.ClientConfig.String(), "WebhookClientConfig", "WebhookClientConfig", 1), `&`, ``, 1) + `,`,
- `Rules:` + repeatedStringForRules + `,`,
- `FailurePolicy:` + valueToStringGenerated(this.FailurePolicy) + `,`,
- `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "v11.LabelSelector", 1) + `,`,
- `SideEffects:` + valueToStringGenerated(this.SideEffects) + `,`,
- `TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`,
- `AdmissionReviewVersions:` + fmt.Sprintf("%v", this.AdmissionReviewVersions) + `,`,
- `MatchPolicy:` + valueToStringGenerated(this.MatchPolicy) + `,`,
- `ReinvocationPolicy:` + valueToStringGenerated(this.ReinvocationPolicy) + `,`,
- `ObjectSelector:` + strings.Replace(fmt.Sprintf("%v", this.ObjectSelector), "LabelSelector", "v11.LabelSelector", 1) + `,`,
- `MatchConditions:` + repeatedStringForMatchConditions + `,`,
- `}`,
- }, "")
- return s
-}
-func (this *MutatingWebhookConfiguration) String() string {
- if this == nil {
- return "nil"
+ if m.NamespaceSelector != nil {
+ {
+ size, err := m.NamespaceSelector.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x2a
}
- repeatedStringForWebhooks := "[]MutatingWebhook{"
- for _, f := range this.Webhooks {
- repeatedStringForWebhooks += strings.Replace(strings.Replace(f.String(), "MutatingWebhook", "MutatingWebhook", 1), `&`, ``, 1) + ","
+ if m.FailurePolicy != nil {
+ i -= len(*m.FailurePolicy)
+ copy(dAtA[i:], *m.FailurePolicy)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FailurePolicy)))
+ i--
+ dAtA[i] = 0x22
}
- repeatedStringForWebhooks += "}"
- s := strings.Join([]string{`&MutatingWebhookConfiguration{`,
- `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`,
- `Webhooks:` + repeatedStringForWebhooks + `,`,
- `}`,
- }, "")
- return s
-}
-func (this *MutatingWebhookConfigurationList) String() string {
- if this == nil {
- return "nil"
+ if len(m.Rules) > 0 {
+ for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
}
- repeatedStringForItems := "[]MutatingWebhookConfiguration{"
- for _, f := range this.Items {
- repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "MutatingWebhookConfiguration", "MutatingWebhookConfiguration", 1), `&`, ``, 1) + ","
+ {
+ size, err := m.ClientConfig.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
}
- repeatedStringForItems += "}"
- s := strings.Join([]string{`&MutatingWebhookConfigurationList{`,
- `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`,
- `Items:` + repeatedStringForItems + `,`,
- `}`,
- }, "")
- return s
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
}
-func (this *ServiceReference) String() string {
- if this == nil {
- return "nil"
+
+func (m *ValidatingWebhookConfiguration) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
}
- s := strings.Join([]string{`&ServiceReference{`,
- `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`,
- `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
- `Path:` + valueToStringGenerated(this.Path) + `,`,
- `Port:` + valueToStringGenerated(this.Port) + `,`,
- `}`,
- }, "")
- return s
+ return dAtA[:n], nil
}
-func (this *ValidatingWebhook) String() string {
- if this == nil {
- return "nil"
- }
- repeatedStringForRules := "[]RuleWithOperations{"
- for _, f := range this.Rules {
- repeatedStringForRules += fmt.Sprintf("%v", f) + ","
+
+func (m *ValidatingWebhookConfiguration) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ValidatingWebhookConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Webhooks) > 0 {
+ for iNdEx := len(m.Webhooks) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Webhooks[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
}
- repeatedStringForRules += "}"
- repeatedStringForMatchConditions := "[]MatchCondition{"
- for _, f := range this.MatchConditions {
- repeatedStringForMatchConditions += strings.Replace(strings.Replace(f.String(), "MatchCondition", "MatchCondition", 1), `&`, ``, 1) + ","
+ {
+ size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
}
- repeatedStringForMatchConditions += "}"
- s := strings.Join([]string{`&ValidatingWebhook{`,
- `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ValidatingWebhookConfigurationList) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ValidatingWebhookConfigurationList) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ValidatingWebhookConfigurationList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Items) > 0 {
+ for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ {
+ size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *Validation) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Validation) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Validation) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i -= len(m.MessageExpression)
+ copy(dAtA[i:], m.MessageExpression)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.MessageExpression)))
+ i--
+ dAtA[i] = 0x22
+ if m.Reason != nil {
+ i -= len(*m.Reason)
+ copy(dAtA[i:], *m.Reason)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Reason)))
+ i--
+ dAtA[i] = 0x1a
+ }
+ i -= len(m.Message)
+ copy(dAtA[i:], m.Message)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.Expression)
+ copy(dAtA[i:], m.Expression)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Expression)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *Variable) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Variable) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Variable) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i -= len(m.Expression)
+ copy(dAtA[i:], m.Expression)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Expression)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *WebhookClientConfig) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *WebhookClientConfig) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *WebhookClientConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.URL != nil {
+ i -= len(*m.URL)
+ copy(dAtA[i:], *m.URL)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.URL)))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.CABundle != nil {
+ i -= len(m.CABundle)
+ copy(dAtA[i:], m.CABundle)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.CABundle)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.Service != nil {
+ {
+ size, err := m.Service.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
+ offset -= sovGenerated(v)
+ base := offset
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return base
+}
+func (m *AuditAnnotation) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Key)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.ValueExpression)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *ExpressionWarning) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.FieldRef)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Warning)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *MatchCondition) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Name)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Expression)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *MatchResources) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.NamespaceSelector != nil {
+ l = m.NamespaceSelector.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.ObjectSelector != nil {
+ l = m.ObjectSelector.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if len(m.ResourceRules) > 0 {
+ for _, e := range m.ResourceRules {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if len(m.ExcludeResourceRules) > 0 {
+ for _, e := range m.ExcludeResourceRules {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if m.MatchPolicy != nil {
+ l = len(*m.MatchPolicy)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *MutatingWebhook) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Name)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.ClientConfig.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Rules) > 0 {
+ for _, e := range m.Rules {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if m.FailurePolicy != nil {
+ l = len(*m.FailurePolicy)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.NamespaceSelector != nil {
+ l = m.NamespaceSelector.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.SideEffects != nil {
+ l = len(*m.SideEffects)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.TimeoutSeconds != nil {
+ n += 1 + sovGenerated(uint64(*m.TimeoutSeconds))
+ }
+ if len(m.AdmissionReviewVersions) > 0 {
+ for _, s := range m.AdmissionReviewVersions {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if m.MatchPolicy != nil {
+ l = len(*m.MatchPolicy)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.ReinvocationPolicy != nil {
+ l = len(*m.ReinvocationPolicy)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.ObjectSelector != nil {
+ l = m.ObjectSelector.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if len(m.MatchConditions) > 0 {
+ for _, e := range m.MatchConditions {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *MutatingWebhookConfiguration) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Webhooks) > 0 {
+ for _, e := range m.Webhooks {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *MutatingWebhookConfigurationList) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *NamedRuleWithOperations) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.ResourceNames) > 0 {
+ for _, s := range m.ResourceNames {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ l = m.RuleWithOperations.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *ParamKind) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.APIVersion)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Kind)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *ParamRef) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Name)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Namespace)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.Selector != nil {
+ l = m.Selector.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.ParameterNotFoundAction != nil {
+ l = len(*m.ParameterNotFoundAction)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *ServiceReference) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Namespace)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Name)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.Path != nil {
+ l = len(*m.Path)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.Port != nil {
+ n += 1 + sovGenerated(uint64(*m.Port))
+ }
+ return n
+}
+
+func (m *TypeChecking) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.ExpressionWarnings) > 0 {
+ for _, e := range m.ExpressionWarnings {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *ValidatingAdmissionPolicy) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Spec.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Status.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *ValidatingAdmissionPolicyBinding) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Spec.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *ValidatingAdmissionPolicyBindingList) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *ValidatingAdmissionPolicyBindingSpec) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.PolicyName)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.ParamRef != nil {
+ l = m.ParamRef.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.MatchResources != nil {
+ l = m.MatchResources.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if len(m.ValidationActions) > 0 {
+ for _, s := range m.ValidationActions {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *ValidatingAdmissionPolicyList) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *ValidatingAdmissionPolicySpec) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.ParamKind != nil {
+ l = m.ParamKind.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.MatchConstraints != nil {
+ l = m.MatchConstraints.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if len(m.Validations) > 0 {
+ for _, e := range m.Validations {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if m.FailurePolicy != nil {
+ l = len(*m.FailurePolicy)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if len(m.AuditAnnotations) > 0 {
+ for _, e := range m.AuditAnnotations {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if len(m.MatchConditions) > 0 {
+ for _, e := range m.MatchConditions {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if len(m.Variables) > 0 {
+ for _, e := range m.Variables {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *ValidatingAdmissionPolicyStatus) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ n += 1 + sovGenerated(uint64(m.ObservedGeneration))
+ if m.TypeChecking != nil {
+ l = m.TypeChecking.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if len(m.Conditions) > 0 {
+ for _, e := range m.Conditions {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *ValidatingWebhook) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Name)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.ClientConfig.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Rules) > 0 {
+ for _, e := range m.Rules {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if m.FailurePolicy != nil {
+ l = len(*m.FailurePolicy)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.NamespaceSelector != nil {
+ l = m.NamespaceSelector.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.SideEffects != nil {
+ l = len(*m.SideEffects)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.TimeoutSeconds != nil {
+ n += 1 + sovGenerated(uint64(*m.TimeoutSeconds))
+ }
+ if len(m.AdmissionReviewVersions) > 0 {
+ for _, s := range m.AdmissionReviewVersions {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if m.MatchPolicy != nil {
+ l = len(*m.MatchPolicy)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.ObjectSelector != nil {
+ l = m.ObjectSelector.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if len(m.MatchConditions) > 0 {
+ for _, e := range m.MatchConditions {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *ValidatingWebhookConfiguration) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Webhooks) > 0 {
+ for _, e := range m.Webhooks {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *ValidatingWebhookConfigurationList) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *Validation) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Expression)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Message)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.Reason != nil {
+ l = len(*m.Reason)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ l = len(m.MessageExpression)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *Variable) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Name)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Expression)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *WebhookClientConfig) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Service != nil {
+ l = m.Service.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.CABundle != nil {
+ l = len(m.CABundle)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.URL != nil {
+ l = len(*m.URL)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func sovGenerated(x uint64) (n int) {
+ return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozGenerated(x uint64) (n int) {
+ return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (this *AuditAnnotation) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&AuditAnnotation{`,
+ `Key:` + fmt.Sprintf("%v", this.Key) + `,`,
+ `ValueExpression:` + fmt.Sprintf("%v", this.ValueExpression) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ExpressionWarning) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ExpressionWarning{`,
+ `FieldRef:` + fmt.Sprintf("%v", this.FieldRef) + `,`,
+ `Warning:` + fmt.Sprintf("%v", this.Warning) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *MatchCondition) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&MatchCondition{`,
+ `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+ `Expression:` + fmt.Sprintf("%v", this.Expression) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *MatchResources) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForResourceRules := "[]NamedRuleWithOperations{"
+ for _, f := range this.ResourceRules {
+ repeatedStringForResourceRules += strings.Replace(strings.Replace(f.String(), "NamedRuleWithOperations", "NamedRuleWithOperations", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForResourceRules += "}"
+ repeatedStringForExcludeResourceRules := "[]NamedRuleWithOperations{"
+ for _, f := range this.ExcludeResourceRules {
+ repeatedStringForExcludeResourceRules += strings.Replace(strings.Replace(f.String(), "NamedRuleWithOperations", "NamedRuleWithOperations", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForExcludeResourceRules += "}"
+ s := strings.Join([]string{`&MatchResources{`,
+ `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`,
+ `ObjectSelector:` + strings.Replace(fmt.Sprintf("%v", this.ObjectSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`,
+ `ResourceRules:` + repeatedStringForResourceRules + `,`,
+ `ExcludeResourceRules:` + repeatedStringForExcludeResourceRules + `,`,
+ `MatchPolicy:` + valueToStringGenerated(this.MatchPolicy) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *MutatingWebhook) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForRules := "[]RuleWithOperations{"
+ for _, f := range this.Rules {
+ repeatedStringForRules += fmt.Sprintf("%v", f) + ","
+ }
+ repeatedStringForRules += "}"
+ repeatedStringForMatchConditions := "[]MatchCondition{"
+ for _, f := range this.MatchConditions {
+ repeatedStringForMatchConditions += strings.Replace(strings.Replace(f.String(), "MatchCondition", "MatchCondition", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForMatchConditions += "}"
+ s := strings.Join([]string{`&MutatingWebhook{`,
+ `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+ `ClientConfig:` + strings.Replace(strings.Replace(this.ClientConfig.String(), "WebhookClientConfig", "WebhookClientConfig", 1), `&`, ``, 1) + `,`,
+ `Rules:` + repeatedStringForRules + `,`,
+ `FailurePolicy:` + valueToStringGenerated(this.FailurePolicy) + `,`,
+ `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`,
+ `SideEffects:` + valueToStringGenerated(this.SideEffects) + `,`,
+ `TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`,
+ `AdmissionReviewVersions:` + fmt.Sprintf("%v", this.AdmissionReviewVersions) + `,`,
+ `MatchPolicy:` + valueToStringGenerated(this.MatchPolicy) + `,`,
+ `ReinvocationPolicy:` + valueToStringGenerated(this.ReinvocationPolicy) + `,`,
+ `ObjectSelector:` + strings.Replace(fmt.Sprintf("%v", this.ObjectSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`,
+ `MatchConditions:` + repeatedStringForMatchConditions + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *MutatingWebhookConfiguration) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForWebhooks := "[]MutatingWebhook{"
+ for _, f := range this.Webhooks {
+ repeatedStringForWebhooks += strings.Replace(strings.Replace(f.String(), "MutatingWebhook", "MutatingWebhook", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForWebhooks += "}"
+ s := strings.Join([]string{`&MutatingWebhookConfiguration{`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `Webhooks:` + repeatedStringForWebhooks + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *MutatingWebhookConfigurationList) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForItems := "[]MutatingWebhookConfiguration{"
+ for _, f := range this.Items {
+ repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "MutatingWebhookConfiguration", "MutatingWebhookConfiguration", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForItems += "}"
+ s := strings.Join([]string{`&MutatingWebhookConfigurationList{`,
+ `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
+ `Items:` + repeatedStringForItems + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *NamedRuleWithOperations) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&NamedRuleWithOperations{`,
+ `ResourceNames:` + fmt.Sprintf("%v", this.ResourceNames) + `,`,
+ `RuleWithOperations:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.RuleWithOperations), "RuleWithOperations", "v11.RuleWithOperations", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ParamKind) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ParamKind{`,
+ `APIVersion:` + fmt.Sprintf("%v", this.APIVersion) + `,`,
+ `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ParamRef) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ParamRef{`,
+ `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+ `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`,
+ `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`,
+ `ParameterNotFoundAction:` + valueToStringGenerated(this.ParameterNotFoundAction) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ServiceReference) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ServiceReference{`,
+ `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`,
+ `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+ `Path:` + valueToStringGenerated(this.Path) + `,`,
+ `Port:` + valueToStringGenerated(this.Port) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *TypeChecking) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForExpressionWarnings := "[]ExpressionWarning{"
+ for _, f := range this.ExpressionWarnings {
+ repeatedStringForExpressionWarnings += strings.Replace(strings.Replace(f.String(), "ExpressionWarning", "ExpressionWarning", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForExpressionWarnings += "}"
+ s := strings.Join([]string{`&TypeChecking{`,
+ `ExpressionWarnings:` + repeatedStringForExpressionWarnings + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ValidatingAdmissionPolicy) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ValidatingAdmissionPolicy{`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ValidatingAdmissionPolicySpec", "ValidatingAdmissionPolicySpec", 1), `&`, ``, 1) + `,`,
+ `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ValidatingAdmissionPolicyStatus", "ValidatingAdmissionPolicyStatus", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ValidatingAdmissionPolicyBinding) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ValidatingAdmissionPolicyBinding{`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ValidatingAdmissionPolicyBindingSpec", "ValidatingAdmissionPolicyBindingSpec", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ValidatingAdmissionPolicyBindingList) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForItems := "[]ValidatingAdmissionPolicyBinding{"
+ for _, f := range this.Items {
+ repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ValidatingAdmissionPolicyBinding", "ValidatingAdmissionPolicyBinding", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForItems += "}"
+ s := strings.Join([]string{`&ValidatingAdmissionPolicyBindingList{`,
+ `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
+ `Items:` + repeatedStringForItems + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ValidatingAdmissionPolicyBindingSpec) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ValidatingAdmissionPolicyBindingSpec{`,
+ `PolicyName:` + fmt.Sprintf("%v", this.PolicyName) + `,`,
+ `ParamRef:` + strings.Replace(this.ParamRef.String(), "ParamRef", "ParamRef", 1) + `,`,
+ `MatchResources:` + strings.Replace(this.MatchResources.String(), "MatchResources", "MatchResources", 1) + `,`,
+ `ValidationActions:` + fmt.Sprintf("%v", this.ValidationActions) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ValidatingAdmissionPolicyList) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForItems := "[]ValidatingAdmissionPolicy{"
+ for _, f := range this.Items {
+ repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ValidatingAdmissionPolicy", "ValidatingAdmissionPolicy", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForItems += "}"
+ s := strings.Join([]string{`&ValidatingAdmissionPolicyList{`,
+ `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
+ `Items:` + repeatedStringForItems + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ValidatingAdmissionPolicySpec) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForValidations := "[]Validation{"
+ for _, f := range this.Validations {
+ repeatedStringForValidations += strings.Replace(strings.Replace(f.String(), "Validation", "Validation", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForValidations += "}"
+ repeatedStringForAuditAnnotations := "[]AuditAnnotation{"
+ for _, f := range this.AuditAnnotations {
+ repeatedStringForAuditAnnotations += strings.Replace(strings.Replace(f.String(), "AuditAnnotation", "AuditAnnotation", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForAuditAnnotations += "}"
+ repeatedStringForMatchConditions := "[]MatchCondition{"
+ for _, f := range this.MatchConditions {
+ repeatedStringForMatchConditions += strings.Replace(strings.Replace(f.String(), "MatchCondition", "MatchCondition", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForMatchConditions += "}"
+ repeatedStringForVariables := "[]Variable{"
+ for _, f := range this.Variables {
+ repeatedStringForVariables += strings.Replace(strings.Replace(f.String(), "Variable", "Variable", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForVariables += "}"
+ s := strings.Join([]string{`&ValidatingAdmissionPolicySpec{`,
+ `ParamKind:` + strings.Replace(this.ParamKind.String(), "ParamKind", "ParamKind", 1) + `,`,
+ `MatchConstraints:` + strings.Replace(this.MatchConstraints.String(), "MatchResources", "MatchResources", 1) + `,`,
+ `Validations:` + repeatedStringForValidations + `,`,
+ `FailurePolicy:` + valueToStringGenerated(this.FailurePolicy) + `,`,
+ `AuditAnnotations:` + repeatedStringForAuditAnnotations + `,`,
+ `MatchConditions:` + repeatedStringForMatchConditions + `,`,
+ `Variables:` + repeatedStringForVariables + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ValidatingAdmissionPolicyStatus) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForConditions := "[]Condition{"
+ for _, f := range this.Conditions {
+ repeatedStringForConditions += fmt.Sprintf("%v", f) + ","
+ }
+ repeatedStringForConditions += "}"
+ s := strings.Join([]string{`&ValidatingAdmissionPolicyStatus{`,
+ `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`,
+ `TypeChecking:` + strings.Replace(this.TypeChecking.String(), "TypeChecking", "TypeChecking", 1) + `,`,
+ `Conditions:` + repeatedStringForConditions + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ValidatingWebhook) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForRules := "[]RuleWithOperations{"
+ for _, f := range this.Rules {
+ repeatedStringForRules += fmt.Sprintf("%v", f) + ","
+ }
+ repeatedStringForRules += "}"
+ repeatedStringForMatchConditions := "[]MatchCondition{"
+ for _, f := range this.MatchConditions {
+ repeatedStringForMatchConditions += strings.Replace(strings.Replace(f.String(), "MatchCondition", "MatchCondition", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForMatchConditions += "}"
+ s := strings.Join([]string{`&ValidatingWebhook{`,
+ `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
`ClientConfig:` + strings.Replace(strings.Replace(this.ClientConfig.String(), "WebhookClientConfig", "WebhookClientConfig", 1), `&`, ``, 1) + `,`,
`Rules:` + repeatedStringForRules + `,`,
`FailurePolicy:` + valueToStringGenerated(this.FailurePolicy) + `,`,
- `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "v11.LabelSelector", 1) + `,`,
+ `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`,
`SideEffects:` + valueToStringGenerated(this.SideEffects) + `,`,
`TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`,
`AdmissionReviewVersions:` + fmt.Sprintf("%v", this.AdmissionReviewVersions) + `,`,
`MatchPolicy:` + valueToStringGenerated(this.MatchPolicy) + `,`,
- `ObjectSelector:` + strings.Replace(fmt.Sprintf("%v", this.ObjectSelector), "LabelSelector", "v11.LabelSelector", 1) + `,`,
+ `ObjectSelector:` + strings.Replace(fmt.Sprintf("%v", this.ObjectSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`,
`MatchConditions:` + repeatedStringForMatchConditions + `,`,
`}`,
}, "")
return s
}
-func (this *ValidatingWebhookConfiguration) String() string {
- if this == nil {
- return "nil"
+func (this *ValidatingWebhookConfiguration) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForWebhooks := "[]ValidatingWebhook{"
+ for _, f := range this.Webhooks {
+ repeatedStringForWebhooks += strings.Replace(strings.Replace(f.String(), "ValidatingWebhook", "ValidatingWebhook", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForWebhooks += "}"
+ s := strings.Join([]string{`&ValidatingWebhookConfiguration{`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `Webhooks:` + repeatedStringForWebhooks + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ValidatingWebhookConfigurationList) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForItems := "[]ValidatingWebhookConfiguration{"
+ for _, f := range this.Items {
+ repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ValidatingWebhookConfiguration", "ValidatingWebhookConfiguration", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForItems += "}"
+ s := strings.Join([]string{`&ValidatingWebhookConfigurationList{`,
+ `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
+ `Items:` + repeatedStringForItems + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *Validation) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&Validation{`,
+ `Expression:` + fmt.Sprintf("%v", this.Expression) + `,`,
+ `Message:` + fmt.Sprintf("%v", this.Message) + `,`,
+ `Reason:` + valueToStringGenerated(this.Reason) + `,`,
+ `MessageExpression:` + fmt.Sprintf("%v", this.MessageExpression) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *Variable) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&Variable{`,
+ `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+ `Expression:` + fmt.Sprintf("%v", this.Expression) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *WebhookClientConfig) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&WebhookClientConfig{`,
+ `Service:` + strings.Replace(this.Service.String(), "ServiceReference", "ServiceReference", 1) + `,`,
+ `CABundle:` + valueToStringGenerated(this.CABundle) + `,`,
+ `URL:` + valueToStringGenerated(this.URL) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func valueToStringGenerated(v interface{}) string {
+ rv := reflect.ValueOf(v)
+ if rv.IsNil() {
+ return "nil"
+ }
+ pv := reflect.Indirect(rv).Interface()
+ return fmt.Sprintf("*%v", pv)
+}
+func (m *AuditAnnotation) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AuditAnnotation: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AuditAnnotation: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Key = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ValueExpression", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ValueExpression = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ExpressionWarning) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ExpressionWarning: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ExpressionWarning: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field FieldRef", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.FieldRef = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Warning", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Warning = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *MatchCondition) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MatchCondition: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MatchCondition: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Expression", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Expression = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *MatchResources) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MatchResources: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MatchResources: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NamespaceSelector", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.NamespaceSelector == nil {
+ m.NamespaceSelector = &v1.LabelSelector{}
+ }
+ if err := m.NamespaceSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectSelector", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ObjectSelector == nil {
+ m.ObjectSelector = &v1.LabelSelector{}
+ }
+ if err := m.ObjectSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ResourceRules", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ResourceRules = append(m.ResourceRules, NamedRuleWithOperations{})
+ if err := m.ResourceRules[len(m.ResourceRules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ExcludeResourceRules", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ExcludeResourceRules = append(m.ExcludeResourceRules, NamedRuleWithOperations{})
+ if err := m.ExcludeResourceRules[len(m.ExcludeResourceRules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MatchPolicy", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := MatchPolicyType(dAtA[iNdEx:postIndex])
+ m.MatchPolicy = &s
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *MutatingWebhook) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MutatingWebhook: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MutatingWebhook: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ClientConfig", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ClientConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Rules = append(m.Rules, v11.RuleWithOperations{})
+ if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field FailurePolicy", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := FailurePolicyType(dAtA[iNdEx:postIndex])
+ m.FailurePolicy = &s
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NamespaceSelector", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.NamespaceSelector == nil {
+ m.NamespaceSelector = &v1.LabelSelector{}
+ }
+ if err := m.NamespaceSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SideEffects", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := SideEffectClass(dAtA[iNdEx:postIndex])
+ m.SideEffects = &s
+ iNdEx = postIndex
+ case 7:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field TimeoutSeconds", wireType)
+ }
+ var v int32
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.TimeoutSeconds = &v
+ case 8:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AdmissionReviewVersions", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.AdmissionReviewVersions = append(m.AdmissionReviewVersions, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 9:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MatchPolicy", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := MatchPolicyType(dAtA[iNdEx:postIndex])
+ m.MatchPolicy = &s
+ iNdEx = postIndex
+ case 10:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ReinvocationPolicy", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := ReinvocationPolicyType(dAtA[iNdEx:postIndex])
+ m.ReinvocationPolicy = &s
+ iNdEx = postIndex
+ case 11:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectSelector", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ObjectSelector == nil {
+ m.ObjectSelector = &v1.LabelSelector{}
+ }
+ if err := m.ObjectSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 12:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MatchConditions", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.MatchConditions = append(m.MatchConditions, MatchCondition{})
+ if err := m.MatchConditions[len(m.MatchConditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *MutatingWebhookConfiguration) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MutatingWebhookConfiguration: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MutatingWebhookConfiguration: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Webhooks", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Webhooks = append(m.Webhooks, MutatingWebhook{})
+ if err := m.Webhooks[len(m.Webhooks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *MutatingWebhookConfigurationList) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MutatingWebhookConfigurationList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MutatingWebhookConfigurationList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, MutatingWebhookConfiguration{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *NamedRuleWithOperations) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: NamedRuleWithOperations: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: NamedRuleWithOperations: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ResourceNames", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ResourceNames = append(m.ResourceNames, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RuleWithOperations", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.RuleWithOperations.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ParamKind) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ParamKind: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ParamKind: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.APIVersion = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Kind = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
}
- repeatedStringForWebhooks := "[]ValidatingWebhook{"
- for _, f := range this.Webhooks {
- repeatedStringForWebhooks += strings.Replace(strings.Replace(f.String(), "ValidatingWebhook", "ValidatingWebhook", 1), `&`, ``, 1) + ","
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
}
- repeatedStringForWebhooks += "}"
- s := strings.Join([]string{`&ValidatingWebhookConfiguration{`,
- `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`,
- `Webhooks:` + repeatedStringForWebhooks + `,`,
- `}`,
- }, "")
- return s
+ return nil
}
-func (this *ValidatingWebhookConfigurationList) String() string {
- if this == nil {
- return "nil"
+func (m *ParamRef) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ParamRef: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ParamRef: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Namespace = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Selector == nil {
+ m.Selector = &v1.LabelSelector{}
+ }
+ if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ParameterNotFoundAction", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := ParameterNotFoundActionType(dAtA[iNdEx:postIndex])
+ m.ParameterNotFoundAction = &s
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
}
- repeatedStringForItems := "[]ValidatingWebhookConfiguration{"
- for _, f := range this.Items {
- repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ValidatingWebhookConfiguration", "ValidatingWebhookConfiguration", 1), `&`, ``, 1) + ","
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
}
- repeatedStringForItems += "}"
- s := strings.Join([]string{`&ValidatingWebhookConfigurationList{`,
- `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`,
- `Items:` + repeatedStringForItems + `,`,
- `}`,
- }, "")
- return s
+ return nil
}
-func (this *WebhookClientConfig) String() string {
- if this == nil {
- return "nil"
+func (m *ServiceReference) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ServiceReference: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ServiceReference: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Namespace = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.Path = &s
+ iNdEx = postIndex
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType)
+ }
+ var v int32
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Port = &v
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
}
- s := strings.Join([]string{`&WebhookClientConfig{`,
- `Service:` + strings.Replace(this.Service.String(), "ServiceReference", "ServiceReference", 1) + `,`,
- `CABundle:` + valueToStringGenerated(this.CABundle) + `,`,
- `URL:` + valueToStringGenerated(this.URL) + `,`,
- `}`,
- }, "")
- return s
-}
-func valueToStringGenerated(v interface{}) string {
- rv := reflect.ValueOf(v)
- if rv.IsNil() {
- return "nil"
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
}
- pv := reflect.Indirect(rv).Interface()
- return fmt.Sprintf("*%v", pv)
+ return nil
}
-func (m *MatchCondition) Unmarshal(dAtA []byte) error {
+func (m *TypeChecking) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@@ -1398,17 +5091,17 @@ func (m *MatchCondition) Unmarshal(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
- return fmt.Errorf("proto: MatchCondition: wiretype end group for non-group")
+ return fmt.Errorf("proto: TypeChecking: wiretype end group for non-group")
}
if fieldNum <= 0 {
- return fmt.Errorf("proto: MatchCondition: illegal tag %d (wire type %d)", fieldNum, wire)
+ return fmt.Errorf("proto: TypeChecking: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field ExpressionWarnings", wireType)
}
- var stringLen uint64
+ var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -1418,55 +5111,25 @@ func (m *MatchCondition) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- stringLen |= uint64(b&0x7F) << shift
+ msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
- intStringLen := int(stringLen)
- if intStringLen < 0 {
+ if msglen < 0 {
return ErrInvalidLengthGenerated
}
- postIndex := iNdEx + intStringLen
+ postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Name = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Expression", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
+ m.ExpressionWarnings = append(m.ExpressionWarnings, ExpressionWarning{})
+ if err := m.ExpressionWarnings[len(m.ExpressionWarnings)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
}
- m.Expression = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
@@ -1489,7 +5152,7 @@ func (m *MatchCondition) Unmarshal(dAtA []byte) error {
}
return nil
}
-func (m *MutatingWebhook) Unmarshal(dAtA []byte) error {
+func (m *ValidatingAdmissionPolicy) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@@ -1512,17 +5175,17 @@ func (m *MutatingWebhook) Unmarshal(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
- return fmt.Errorf("proto: MutatingWebhook: wiretype end group for non-group")
+ return fmt.Errorf("proto: ValidatingAdmissionPolicy: wiretype end group for non-group")
}
if fieldNum <= 0 {
- return fmt.Errorf("proto: MutatingWebhook: illegal tag %d (wire type %d)", fieldNum, wire)
+ return fmt.Errorf("proto: ValidatingAdmissionPolicy: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
}
- var stringLen uint64
+ var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -1532,27 +5195,28 @@ func (m *MutatingWebhook) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- stringLen |= uint64(b&0x7F) << shift
+ msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
- intStringLen := int(stringLen)
- if intStringLen < 0 {
+ if msglen < 0 {
return ErrInvalidLengthGenerated
}
- postIndex := iNdEx + intStringLen
+ postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Name = string(dAtA[iNdEx:postIndex])
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
iNdEx = postIndex
case 2:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ClientConfig", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
@@ -1579,13 +5243,13 @@ func (m *MutatingWebhook) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- if err := m.ClientConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 3:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
@@ -1612,16 +5276,65 @@ func (m *MutatingWebhook) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Rules = append(m.Rules, v1.RuleWithOperations{})
- if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
- case 4:
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ValidatingAdmissionPolicyBinding) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ValidatingAdmissionPolicyBinding: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ValidatingAdmissionPolicyBinding: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field FailurePolicy", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
}
- var stringLen uint64
+ var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -1631,28 +5344,28 @@ func (m *MutatingWebhook) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- stringLen |= uint64(b&0x7F) << shift
+ msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
- intStringLen := int(stringLen)
- if intStringLen < 0 {
+ if msglen < 0 {
return ErrInvalidLengthGenerated
}
- postIndex := iNdEx + intStringLen
+ postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- s := FailurePolicyType(dAtA[iNdEx:postIndex])
- m.FailurePolicy = &s
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
iNdEx = postIndex
- case 5:
+ case 2:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field NamespaceSelector", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
@@ -1679,18 +5392,65 @@ func (m *MutatingWebhook) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- if m.NamespaceSelector == nil {
- m.NamespaceSelector = &v11.LabelSelector{}
- }
- if err := m.NamespaceSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
- case 6:
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ValidatingAdmissionPolicyBindingList) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ValidatingAdmissionPolicyBindingList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ValidatingAdmissionPolicyBindingList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field SideEffects", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
}
- var stringLen uint64
+ var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -1700,50 +5460,30 @@ func (m *MutatingWebhook) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- stringLen |= uint64(b&0x7F) << shift
+ msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
- intStringLen := int(stringLen)
- if intStringLen < 0 {
+ if msglen < 0 {
return ErrInvalidLengthGenerated
}
- postIndex := iNdEx + intStringLen
+ postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- s := SideEffectClass(dAtA[iNdEx:postIndex])
- m.SideEffects = &s
- iNdEx = postIndex
- case 7:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field TimeoutSeconds", wireType)
- }
- var v int32
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
+ if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
}
- m.TimeoutSeconds = &v
- case 8:
+ iNdEx = postIndex
+ case 2:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field AdmissionReviewVersions", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
}
- var stringLen uint64
+ var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -1753,27 +5493,79 @@ func (m *MutatingWebhook) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- stringLen |= uint64(b&0x7F) << shift
+ msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
- intStringLen := int(stringLen)
- if intStringLen < 0 {
+ if msglen < 0 {
return ErrInvalidLengthGenerated
}
- postIndex := iNdEx + intStringLen
+ postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
- if postIndex > l {
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, ValidatingAdmissionPolicyBinding{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ValidatingAdmissionPolicyBindingSpec) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
return io.ErrUnexpectedEOF
}
- m.AdmissionReviewVersions = append(m.AdmissionReviewVersions, string(dAtA[iNdEx:postIndex]))
- iNdEx = postIndex
- case 9:
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ValidatingAdmissionPolicyBindingSpec: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ValidatingAdmissionPolicyBindingSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field MatchPolicy", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field PolicyName", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
@@ -1801,14 +5593,13 @@ func (m *MutatingWebhook) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- s := MatchPolicyType(dAtA[iNdEx:postIndex])
- m.MatchPolicy = &s
+ m.PolicyName = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
- case 10:
+ case 2:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ReinvocationPolicy", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field ParamRef", wireType)
}
- var stringLen uint64
+ var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -1818,28 +5609,31 @@ func (m *MutatingWebhook) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- stringLen |= uint64(b&0x7F) << shift
+ msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
- intStringLen := int(stringLen)
- if intStringLen < 0 {
+ if msglen < 0 {
return ErrInvalidLengthGenerated
}
- postIndex := iNdEx + intStringLen
+ postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- s := ReinvocationPolicyType(dAtA[iNdEx:postIndex])
- m.ReinvocationPolicy = &s
+ if m.ParamRef == nil {
+ m.ParamRef = &ParamRef{}
+ }
+ if err := m.ParamRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
iNdEx = postIndex
- case 11:
+ case 3:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ObjectSelector", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field MatchResources", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
@@ -1866,18 +5660,18 @@ func (m *MutatingWebhook) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- if m.ObjectSelector == nil {
- m.ObjectSelector = &v11.LabelSelector{}
+ if m.MatchResources == nil {
+ m.MatchResources = &MatchResources{}
}
- if err := m.ObjectSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ if err := m.MatchResources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
- case 12:
+ case 4:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field MatchConditions", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field ValidationActions", wireType)
}
- var msglen int
+ var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -1887,25 +5681,23 @@ func (m *MutatingWebhook) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- msglen |= int(b&0x7F) << shift
+ stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
- if msglen < 0 {
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
- postIndex := iNdEx + msglen
+ postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.MatchConditions = append(m.MatchConditions, MatchCondition{})
- if err := m.MatchConditions[len(m.MatchConditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
+ m.ValidationActions = append(m.ValidationActions, ValidationAction(dAtA[iNdEx:postIndex]))
iNdEx = postIndex
default:
iNdEx = preIndex
@@ -1928,7 +5720,7 @@ func (m *MutatingWebhook) Unmarshal(dAtA []byte) error {
}
return nil
}
-func (m *MutatingWebhookConfiguration) Unmarshal(dAtA []byte) error {
+func (m *ValidatingAdmissionPolicyList) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@@ -1951,15 +5743,15 @@ func (m *MutatingWebhookConfiguration) Unmarshal(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
- return fmt.Errorf("proto: MutatingWebhookConfiguration: wiretype end group for non-group")
+ return fmt.Errorf("proto: ValidatingAdmissionPolicyList: wiretype end group for non-group")
}
if fieldNum <= 0 {
- return fmt.Errorf("proto: MutatingWebhookConfiguration: illegal tag %d (wire type %d)", fieldNum, wire)
+ return fmt.Errorf("proto: ValidatingAdmissionPolicyList: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
@@ -1986,13 +5778,13 @@ func (m *MutatingWebhookConfiguration) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Webhooks", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
@@ -2019,8 +5811,8 @@ func (m *MutatingWebhookConfiguration) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Webhooks = append(m.Webhooks, MutatingWebhook{})
- if err := m.Webhooks[len(m.Webhooks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ m.Items = append(m.Items, ValidatingAdmissionPolicy{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
@@ -2045,7 +5837,7 @@ func (m *MutatingWebhookConfiguration) Unmarshal(dAtA []byte) error {
}
return nil
}
-func (m *MutatingWebhookConfigurationList) Unmarshal(dAtA []byte) error {
+func (m *ValidatingAdmissionPolicySpec) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@@ -2068,15 +5860,15 @@ func (m *MutatingWebhookConfigurationList) Unmarshal(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
- return fmt.Errorf("proto: MutatingWebhookConfigurationList: wiretype end group for non-group")
+ return fmt.Errorf("proto: ValidatingAdmissionPolicySpec: wiretype end group for non-group")
}
if fieldNum <= 0 {
- return fmt.Errorf("proto: MutatingWebhookConfigurationList: illegal tag %d (wire type %d)", fieldNum, wire)
+ return fmt.Errorf("proto: ValidatingAdmissionPolicySpec: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field ParamKind", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
@@ -2103,13 +5895,187 @@ func (m *MutatingWebhookConfigurationList) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ if m.ParamKind == nil {
+ m.ParamKind = &ParamKind{}
+ }
+ if err := m.ParamKind.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field MatchConstraints", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.MatchConstraints == nil {
+ m.MatchConstraints = &MatchResources{}
+ }
+ if err := m.MatchConstraints.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Validations", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Validations = append(m.Validations, Validation{})
+ if err := m.Validations[len(m.Validations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field FailurePolicy", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := FailurePolicyType(dAtA[iNdEx:postIndex])
+ m.FailurePolicy = &s
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AuditAnnotations", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.AuditAnnotations = append(m.AuditAnnotations, AuditAnnotation{})
+ if err := m.AuditAnnotations[len(m.AuditAnnotations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MatchConditions", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.MatchConditions = append(m.MatchConditions, MatchCondition{})
+ if err := m.MatchConditions[len(m.MatchConditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Variables", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
@@ -2136,8 +6102,8 @@ func (m *MutatingWebhookConfigurationList) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Items = append(m.Items, MutatingWebhookConfiguration{})
- if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ m.Variables = append(m.Variables, Variable{})
+ if err := m.Variables[len(m.Variables)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
@@ -2162,7 +6128,7 @@ func (m *MutatingWebhookConfigurationList) Unmarshal(dAtA []byte) error {
}
return nil
}
-func (m *ServiceReference) Unmarshal(dAtA []byte) error {
+func (m *ValidatingAdmissionPolicyStatus) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@@ -2185,17 +6151,17 @@ func (m *ServiceReference) Unmarshal(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
- return fmt.Errorf("proto: ServiceReference: wiretype end group for non-group")
+ return fmt.Errorf("proto: ValidatingAdmissionPolicyStatus: wiretype end group for non-group")
}
if fieldNum <= 0 {
- return fmt.Errorf("proto: ServiceReference: illegal tag %d (wire type %d)", fieldNum, wire)
+ return fmt.Errorf("proto: ValidatingAdmissionPolicyStatus: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType)
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType)
}
- var stringLen uint64
+ m.ObservedGeneration = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -2205,29 +6171,16 @@ func (m *ServiceReference) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- stringLen |= uint64(b&0x7F) << shift
+ m.ObservedGeneration |= int64(b&0x7F) << shift
if b < 0x80 {
break
}
}
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Namespace = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
case 2:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field TypeChecking", wireType)
}
- var stringLen uint64
+ var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -2237,29 +6190,33 @@ func (m *ServiceReference) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- stringLen |= uint64(b&0x7F) << shift
+ msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
- intStringLen := int(stringLen)
- if intStringLen < 0 {
+ if msglen < 0 {
return ErrInvalidLengthGenerated
}
- postIndex := iNdEx + intStringLen
+ postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Name = string(dAtA[iNdEx:postIndex])
+ if m.TypeChecking == nil {
+ m.TypeChecking = &TypeChecking{}
+ }
+ if err := m.TypeChecking.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
iNdEx = postIndex
case 3:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType)
}
- var stringLen uint64
+ var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -2269,45 +6226,26 @@ func (m *ServiceReference) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- stringLen |= uint64(b&0x7F) << shift
+ msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
- intStringLen := int(stringLen)
- if intStringLen < 0 {
+ if msglen < 0 {
return ErrInvalidLengthGenerated
}
- postIndex := iNdEx + intStringLen
+ postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- s := string(dAtA[iNdEx:postIndex])
- m.Path = &s
- iNdEx = postIndex
- case 4:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType)
- }
- var v int32
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
+ m.Conditions = append(m.Conditions, v1.Condition{})
+ if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
}
- m.Port = &v
+ iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
@@ -2452,7 +6390,7 @@ func (m *ValidatingWebhook) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Rules = append(m.Rules, v1.RuleWithOperations{})
+ m.Rules = append(m.Rules, v11.RuleWithOperations{})
if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
@@ -2520,7 +6458,7 @@ func (m *ValidatingWebhook) Unmarshal(dAtA []byte) error {
return io.ErrUnexpectedEOF
}
if m.NamespaceSelector == nil {
- m.NamespaceSelector = &v11.LabelSelector{}
+ m.NamespaceSelector = &v1.LabelSelector{}
}
if err := m.NamespaceSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
@@ -2563,27 +6501,212 @@ func (m *ValidatingWebhook) Unmarshal(dAtA []byte) error {
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field TimeoutSeconds", wireType)
}
- var v int32
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
+ var v int32
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.TimeoutSeconds = &v
+ case 8:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AdmissionReviewVersions", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.AdmissionReviewVersions = append(m.AdmissionReviewVersions, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 9:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MatchPolicy", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := MatchPolicyType(dAtA[iNdEx:postIndex])
+ m.MatchPolicy = &s
+ iNdEx = postIndex
+ case 10:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectSelector", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ObjectSelector == nil {
+ m.ObjectSelector = &v1.LabelSelector{}
+ }
+ if err := m.ObjectSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 11:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MatchConditions", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.MatchConditions = append(m.MatchConditions, MatchCondition{})
+ if err := m.MatchConditions[len(m.MatchConditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ValidatingWebhookConfiguration) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
}
- m.TimeoutSeconds = &v
- case 8:
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ValidatingWebhookConfiguration: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ValidatingWebhookConfiguration: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field AdmissionReviewVersions", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
}
- var stringLen uint64
+ var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -2593,29 +6716,30 @@ func (m *ValidatingWebhook) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- stringLen |= uint64(b&0x7F) << shift
+ msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
- intStringLen := int(stringLen)
- if intStringLen < 0 {
+ if msglen < 0 {
return ErrInvalidLengthGenerated
}
- postIndex := iNdEx + intStringLen
+ postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.AdmissionReviewVersions = append(m.AdmissionReviewVersions, string(dAtA[iNdEx:postIndex]))
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
iNdEx = postIndex
- case 9:
+ case 2:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field MatchPolicy", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Webhooks", wireType)
}
- var stringLen uint64
+ var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -2625,28 +6749,79 @@ func (m *ValidatingWebhook) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- stringLen |= uint64(b&0x7F) << shift
+ msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
- intStringLen := int(stringLen)
- if intStringLen < 0 {
+ if msglen < 0 {
return ErrInvalidLengthGenerated
}
- postIndex := iNdEx + intStringLen
+ postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- s := MatchPolicyType(dAtA[iNdEx:postIndex])
- m.MatchPolicy = &s
+ m.Webhooks = append(m.Webhooks, ValidatingWebhook{})
+ if err := m.Webhooks[len(m.Webhooks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
iNdEx = postIndex
- case 10:
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ValidatingWebhookConfigurationList) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ValidatingWebhookConfigurationList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ValidatingWebhookConfigurationList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ObjectSelector", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
@@ -2673,16 +6848,13 @@ func (m *ValidatingWebhook) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- if m.ObjectSelector == nil {
- m.ObjectSelector = &v11.LabelSelector{}
- }
- if err := m.ObjectSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
- case 11:
+ case 2:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field MatchConditions", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
@@ -2709,8 +6881,8 @@ func (m *ValidatingWebhook) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.MatchConditions = append(m.MatchConditions, MatchCondition{})
- if err := m.MatchConditions[len(m.MatchConditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ m.Items = append(m.Items, ValidatingWebhookConfiguration{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
@@ -2735,7 +6907,7 @@ func (m *ValidatingWebhook) Unmarshal(dAtA []byte) error {
}
return nil
}
-func (m *ValidatingWebhookConfiguration) Unmarshal(dAtA []byte) error {
+func (m *Validation) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@@ -2758,17 +6930,17 @@ func (m *ValidatingWebhookConfiguration) Unmarshal(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
- return fmt.Errorf("proto: ValidatingWebhookConfiguration: wiretype end group for non-group")
+ return fmt.Errorf("proto: Validation: wiretype end group for non-group")
}
if fieldNum <= 0 {
- return fmt.Errorf("proto: ValidatingWebhookConfiguration: illegal tag %d (wire type %d)", fieldNum, wire)
+ return fmt.Errorf("proto: Validation: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Expression", wireType)
}
- var msglen int
+ var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -2778,30 +6950,29 @@ func (m *ValidatingWebhookConfiguration) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- msglen |= int(b&0x7F) << shift
+ stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
- if msglen < 0 {
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
- postIndex := iNdEx + msglen
+ postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
+ m.Expression = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Webhooks", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType)
}
- var msglen int
+ var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -2811,25 +6982,88 @@ func (m *ValidatingWebhookConfiguration) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- msglen |= int(b&0x7F) << shift
+ stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
- if msglen < 0 {
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
- postIndex := iNdEx + msglen
+ postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Webhooks = append(m.Webhooks, ValidatingWebhook{})
- if err := m.Webhooks[len(m.Webhooks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
+ m.Message = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
}
+ s := k8s_io_apimachinery_pkg_apis_meta_v1.StatusReason(dAtA[iNdEx:postIndex])
+ m.Reason = &s
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MessageExpression", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.MessageExpression = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
@@ -2852,7 +7086,7 @@ func (m *ValidatingWebhookConfiguration) Unmarshal(dAtA []byte) error {
}
return nil
}
-func (m *ValidatingWebhookConfigurationList) Unmarshal(dAtA []byte) error {
+func (m *Variable) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@@ -2875,17 +7109,17 @@ func (m *ValidatingWebhookConfigurationList) Unmarshal(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
- return fmt.Errorf("proto: ValidatingWebhookConfigurationList: wiretype end group for non-group")
+ return fmt.Errorf("proto: Variable: wiretype end group for non-group")
}
if fieldNum <= 0 {
- return fmt.Errorf("proto: ValidatingWebhookConfigurationList: illegal tag %d (wire type %d)", fieldNum, wire)
+ return fmt.Errorf("proto: Variable: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
}
- var msglen int
+ var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -2895,30 +7129,29 @@ func (m *ValidatingWebhookConfigurationList) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- msglen |= int(b&0x7F) << shift
+ stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
- if msglen < 0 {
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
- postIndex := iNdEx + msglen
+ postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
+ m.Name = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Expression", wireType)
}
- var msglen int
+ var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -2928,25 +7161,23 @@ func (m *ValidatingWebhookConfigurationList) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- msglen |= int(b&0x7F) << shift
+ stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
- if msglen < 0 {
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
- postIndex := iNdEx + msglen
+ postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Items = append(m.Items, ValidatingWebhookConfiguration{})
- if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
+ m.Expression = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto b/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto
index cfd7592854..1855cdfc4f 100644
--- a/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto
+++ b/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto
@@ -29,6 +29,56 @@ import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
// Package-wide variables from generator "generated".
option go_package = "k8s.io/api/admissionregistration/v1beta1";
+// AuditAnnotation describes how to produce an audit annotation for an API request.
+message AuditAnnotation {
+ // key specifies the audit annotation key. The audit annotation keys of
+ // a ValidatingAdmissionPolicy must be unique. The key must be a qualified
+ // name ([A-Za-z0-9][-A-Za-z0-9_.]*) no more than 63 bytes in length.
+ //
+ // The key is combined with the resource name of the
+ // ValidatingAdmissionPolicy to construct an audit annotation key:
+ // "{ValidatingAdmissionPolicy name}/{key}".
+ //
+ // If an admission webhook uses the same resource name as this ValidatingAdmissionPolicy
+ // and the same audit annotation key, the annotation key will be identical.
+ // In this case, the first annotation written with the key will be included
+ // in the audit event and all subsequent annotations with the same key
+ // will be discarded.
+ //
+ // Required.
+ optional string key = 1;
+
+ // valueExpression represents the expression which is evaluated by CEL to
+ // produce an audit annotation value. The expression must evaluate to either
+ // a string or null value. If the expression evaluates to a string, the
+ // audit annotation is included with the string value. If the expression
+ // evaluates to null or empty string the audit annotation will be omitted.
+ // The valueExpression may be no longer than 5kb in length.
+ // If the result of the valueExpression is more than 10kb in length, it
+ // will be truncated to 10kb.
+ //
+ // If multiple ValidatingAdmissionPolicyBinding resources match an
+ // API request, then the valueExpression will be evaluated for
+ // each binding. All unique values produced by the valueExpressions
+ // will be joined together in a comma-separated list.
+ //
+ // Required.
+ optional string valueExpression = 2;
+}
+
+// ExpressionWarning is a warning information that targets a specific expression.
+message ExpressionWarning {
+ // The path to the field that refers the expression.
+ // For example, the reference to the expression of the first item of
+ // validations is "spec.validations[0].expression"
+ optional string fieldRef = 2;
+
+ // The content of type checking information in a human-readable form.
+ // Each line of the warning contains the type that the expression is checked
+ // against, followed by the type check error from the compiler.
+ optional string warning = 3;
+}
+
// MatchCondition represents a condition which must be fulfilled for a request to be sent to a webhook.
message MatchCondition {
// Name is an identifier for this match condition, used for strategic merging of MatchConditions,
@@ -58,6 +108,101 @@ message MatchCondition {
optional string expression = 2;
}
+// MatchResources decides whether to run the admission control policy on an object based
+// on whether it meets the match criteria.
+// The exclude rules take precedence over include rules (if a resource matches both, it is excluded)
+// +structType=atomic
+message MatchResources {
+ // NamespaceSelector decides whether to run the admission control policy on an object based
+ // on whether the namespace for that object matches the selector. If the
+ // object itself is a namespace, the matching is performed on
+ // object.metadata.labels. If the object is another cluster scoped resource,
+ // it never skips the policy.
+ //
+ // For example, to run the webhook on any objects whose namespace is not
+ // associated with "runlevel" of "0" or "1"; you will set the selector as
+ // follows:
+ // "namespaceSelector": {
+ // "matchExpressions": [
+ // {
+ // "key": "runlevel",
+ // "operator": "NotIn",
+ // "values": [
+ // "0",
+ // "1"
+ // ]
+ // }
+ // ]
+ // }
+ //
+ // If instead you want to only run the policy on any objects whose
+ // namespace is associated with the "environment" of "prod" or "staging";
+ // you will set the selector as follows:
+ // "namespaceSelector": {
+ // "matchExpressions": [
+ // {
+ // "key": "environment",
+ // "operator": "In",
+ // "values": [
+ // "prod",
+ // "staging"
+ // ]
+ // }
+ // ]
+ // }
+ //
+ // See
+ // https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
+ // for more examples of label selectors.
+ //
+ // Default to the empty LabelSelector, which matches everything.
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 1;
+
+ // ObjectSelector decides whether to run the validation based on if the
+ // object has matching labels. objectSelector is evaluated against both
+ // the oldObject and newObject that would be sent to the cel validation, and
+ // is considered to match if either object matches the selector. A null
+ // object (oldObject in the case of create, or newObject in the case of
+ // delete) or an object that cannot have labels (like a
+ // DeploymentRollback or a PodProxyOptions object) is not considered to
+ // match.
+ // Use the object selector only if the webhook is opt-in, because end
+ // users may skip the admission webhook by setting the labels.
+ // Default to the empty LabelSelector, which matches everything.
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector objectSelector = 2;
+
+ // ResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy matches.
+ // The policy cares about an operation if it matches _any_ Rule.
+ // +listType=atomic
+ // +optional
+ repeated NamedRuleWithOperations resourceRules = 3;
+
+ // ExcludeResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy should not care about.
+ // The exclude rules take precedence over include rules (if a resource matches both, it is excluded)
+ // +listType=atomic
+ // +optional
+ repeated NamedRuleWithOperations excludeResourceRules = 4;
+
+ // matchPolicy defines how the "MatchResources" list is used to match incoming requests.
+ // Allowed values are "Exact" or "Equivalent".
+ //
+ // - Exact: match a request only if it exactly matches a specified rule.
+ // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1,
+ // but "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`,
+ // a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the ValidatingAdmissionPolicy.
+ //
+ // - Equivalent: match a request if modifies a resource listed in rules, even via another API group or version.
+ // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1,
+ // and "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`,
+ // a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the ValidatingAdmissionPolicy.
+ //
+ // Defaults to "Equivalent"
+ // +optional
+ optional string matchPolicy = 7;
+}
+
// MutatingWebhook describes an admission webhook and the resources and operations it applies to.
message MutatingWebhook {
// The name of the admission webhook.
@@ -219,7 +364,7 @@ message MutatingWebhook {
// - If failurePolicy=Fail, reject the request
// - If failurePolicy=Ignore, the error is ignored and the webhook is skipped
//
- // This is an alpha feature and managed by the AdmissionWebhookMatchConditions feature gate.
+ // This is a beta feature and managed by the AdmissionWebhookMatchConditions feature gate.
//
// +patchMergeKey=name
// +patchStrategy=merge
@@ -255,6 +400,88 @@ message MutatingWebhookConfigurationList {
repeated MutatingWebhookConfiguration items = 2;
}
+// NamedRuleWithOperations is a tuple of Operations and Resources with ResourceNames.
+// +structType=atomic
+message NamedRuleWithOperations {
+ // ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed.
+ // +listType=atomic
+ // +optional
+ repeated string resourceNames = 1;
+
+ // RuleWithOperations is a tuple of Operations and Resources.
+ optional k8s.io.api.admissionregistration.v1.RuleWithOperations ruleWithOperations = 2;
+}
+
+// ParamKind is a tuple of Group Kind and Version.
+// +structType=atomic
+message ParamKind {
+ // APIVersion is the API group version the resources belong to.
+ // In format of "group/version".
+ // Required.
+ optional string apiVersion = 1;
+
+ // Kind is the API kind the resources belong to.
+ // Required.
+ optional string kind = 2;
+}
+
+// ParamRef describes how to locate the params to be used as input to
+// expressions of rules applied by a policy binding.
+// +structType=atomic
+message ParamRef {
+ // name is the name of the resource being referenced.
+ //
+ // One of `name` or `selector` must be set, but `name` and `selector` are
+ // mutually exclusive properties. If one is set, the other must be unset.
+ //
+ // A single parameter used for all admission requests can be configured
+ // by setting the `name` field, leaving `selector` blank, and setting namespace
+ // if `paramKind` is namespace-scoped.
+ optional string name = 1;
+
+ // namespace is the namespace of the referenced resource. Allows limiting
+ // the search for params to a specific namespace. Applies to both `name` and
+ // `selector` fields.
+ //
+ // A per-namespace parameter may be used by specifying a namespace-scoped
+ // `paramKind` in the policy and leaving this field empty.
+ //
+ // - If `paramKind` is cluster-scoped, this field MUST be unset. Setting this
+ // field results in a configuration error.
+ //
+ // - If `paramKind` is namespace-scoped, the namespace of the object being
+ // evaluated for admission will be used when this field is left unset. Take
+ // care that if this is left empty the binding must not match any cluster-scoped
+ // resources, which will result in an error.
+ //
+ // +optional
+ optional string namespace = 2;
+
+ // selector can be used to match multiple param objects based on their labels.
+ // Supply selector: {} to match all resources of the ParamKind.
+ //
+ // If multiple params are found, they are all evaluated with the policy expressions
+ // and the results are ANDed together.
+ //
+ // One of `name` or `selector` must be set, but `name` and `selector` are
+ // mutually exclusive properties. If one is set, the other must be unset.
+ //
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 3;
+
+ // `parameterNotFoundAction` controls the behavior of the binding when the resource
+ // exists, and name or selector is valid, but there are no parameters
+ // matched by the binding. If the value is set to `Allow`, then no
+ // matched parameters will be treated as successful validation by the binding.
+ // If set to `Deny`, then no matched parameters will be subject to the
+ // `failurePolicy` of the policy.
+ //
+ // Allowed values are `Allow` or `Deny`
+ //
+ // Required
+ optional string parameterNotFoundAction = 4;
+}
+
// ServiceReference holds a reference to Service.legacy.k8s.io
message ServiceReference {
// `namespace` is the namespace of the service.
@@ -277,6 +504,248 @@ message ServiceReference {
optional int32 port = 4;
}
+// TypeChecking contains results of type checking the expressions in the
+// ValidatingAdmissionPolicy
+message TypeChecking {
+ // The type checking warnings for each expression.
+ // +optional
+ // +listType=atomic
+ repeated ExpressionWarning expressionWarnings = 1;
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:prerelease-lifecycle-gen:introduced=1.28
+// ValidatingAdmissionPolicy describes the definition of an admission validation policy that accepts or rejects an object without changing it.
+message ValidatingAdmissionPolicy {
+ // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+ // Specification of the desired behavior of the ValidatingAdmissionPolicy.
+ optional ValidatingAdmissionPolicySpec spec = 2;
+
+ // The status of the ValidatingAdmissionPolicy, including warnings that are useful to determine if the policy
+ // behaves in the expected way.
+ // Populated by the system.
+ // Read-only.
+ // +optional
+ optional ValidatingAdmissionPolicyStatus status = 3;
+}
+
+// ValidatingAdmissionPolicyBinding binds the ValidatingAdmissionPolicy with paramerized resources.
+// ValidatingAdmissionPolicyBinding and parameter CRDs together define how cluster administrators configure policies for clusters.
+//
+// For a given admission request, each binding will cause its policy to be
+// evaluated N times, where N is 1 for policies/bindings that don't use
+// params, otherwise N is the number of parameters selected by the binding.
+//
+// The CEL expressions of a policy must have a computed CEL cost below the maximum
+// CEL budget. Each evaluation of the policy is given an independent CEL cost budget.
+// Adding/removing policies, bindings, or params can not affect whether a
+// given (policy, binding, param) combination is within its own CEL budget.
+message ValidatingAdmissionPolicyBinding {
+ // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+ // Specification of the desired behavior of the ValidatingAdmissionPolicyBinding.
+ optional ValidatingAdmissionPolicyBindingSpec spec = 2;
+}
+
+// ValidatingAdmissionPolicyBindingList is a list of ValidatingAdmissionPolicyBinding.
+message ValidatingAdmissionPolicyBindingList {
+ // Standard list metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+ // List of PolicyBinding.
+ repeated ValidatingAdmissionPolicyBinding items = 2;
+}
+
+// ValidatingAdmissionPolicyBindingSpec is the specification of the ValidatingAdmissionPolicyBinding.
+message ValidatingAdmissionPolicyBindingSpec {
+ // PolicyName references a ValidatingAdmissionPolicy name which the ValidatingAdmissionPolicyBinding binds to.
+ // If the referenced resource does not exist, this binding is considered invalid and will be ignored
+ // Required.
+ optional string policyName = 1;
+
+ // paramRef specifies the parameter resource used to configure the admission control policy.
+ // It should point to a resource of the type specified in ParamKind of the bound ValidatingAdmissionPolicy.
+ // If the policy specifies a ParamKind and the resource referred to by ParamRef does not exist, this binding is considered mis-configured and the FailurePolicy of the ValidatingAdmissionPolicy applied.
+ // If the policy does not specify a ParamKind then this field is ignored, and the rules are evaluated without a param.
+ // +optional
+ optional ParamRef paramRef = 2;
+
+ // MatchResources declares what resources match this binding and will be validated by it.
+ // Note that this is intersected with the policy's matchConstraints, so only requests that are matched by the policy can be selected by this.
+ // If this is unset, all resources matched by the policy are validated by this binding
+ // When resourceRules is unset, it does not constrain resource matching. If a resource is matched by the other fields of this object, it will be validated.
+ // Note that this is differs from ValidatingAdmissionPolicy matchConstraints, where resourceRules are required.
+ // +optional
+ optional MatchResources matchResources = 3;
+
+ // validationActions declares how Validations of the referenced ValidatingAdmissionPolicy are enforced.
+ // If a validation evaluates to false it is always enforced according to these actions.
+ //
+ // Failures defined by the ValidatingAdmissionPolicy's FailurePolicy are enforced according
+ // to these actions only if the FailurePolicy is set to Fail, otherwise the failures are
+ // ignored. This includes compilation errors, runtime errors and misconfigurations of the policy.
+ //
+ // validationActions is declared as a set of action values. Order does
+ // not matter. validationActions may not contain duplicates of the same action.
+ //
+ // The supported actions values are:
+ //
+ // "Deny" specifies that a validation failure results in a denied request.
+ //
+ // "Warn" specifies that a validation failure is reported to the request client
+ // in HTTP Warning headers, with a warning code of 299. Warnings can be sent
+ // both for allowed or denied admission responses.
+ //
+ // "Audit" specifies that a validation failure is included in the published
+ // audit event for the request. The audit event will contain a
+ // `validation.policy.admission.k8s.io/validation_failure` audit annotation
+ // with a value containing the details of the validation failures, formatted as
+ // a JSON list of objects, each with the following fields:
+ // - message: The validation failure message string
+ // - policy: The resource name of the ValidatingAdmissionPolicy
+ // - binding: The resource name of the ValidatingAdmissionPolicyBinding
+ // - expressionIndex: The index of the failed validations in the ValidatingAdmissionPolicy
+ // - validationActions: The enforcement actions enacted for the validation failure
+ // Example audit annotation:
+ // `"validation.policy.admission.k8s.io/validation_failure": "[{\"message\": \"Invalid value\", {\"policy\": \"policy.example.com\", {\"binding\": \"policybinding.example.com\", {\"expressionIndex\": \"1\", {\"validationActions\": [\"Audit\"]}]"`
+ //
+ // Clients should expect to handle additional values by ignoring
+ // any values not recognized.
+ //
+ // "Deny" and "Warn" may not be used together since this combination
+ // needlessly duplicates the validation failure both in the
+ // API response body and the HTTP warning headers.
+ //
+ // Required.
+ // +listType=set
+ repeated string validationActions = 4;
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:prerelease-lifecycle-gen:introduced=1.28
+// ValidatingAdmissionPolicyList is a list of ValidatingAdmissionPolicy.
+message ValidatingAdmissionPolicyList {
+ // Standard list metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+ // List of ValidatingAdmissionPolicy.
+ repeated ValidatingAdmissionPolicy items = 2;
+}
+
+// ValidatingAdmissionPolicySpec is the specification of the desired behavior of the AdmissionPolicy.
+message ValidatingAdmissionPolicySpec {
+ // ParamKind specifies the kind of resources used to parameterize this policy.
+ // If absent, there are no parameters for this policy and the param CEL variable will not be provided to validation expressions.
+ // If ParamKind refers to a non-existent kind, this policy definition is mis-configured and the FailurePolicy is applied.
+ // If paramKind is specified but paramRef is unset in ValidatingAdmissionPolicyBinding, the params variable will be null.
+ // +optional
+ optional ParamKind paramKind = 1;
+
+ // MatchConstraints specifies what resources this policy is designed to validate.
+ // The AdmissionPolicy cares about a request if it matches _all_ Constraints.
+ // However, in order to prevent clusters from being put into an unstable state that cannot be recovered from via the API
+ // ValidatingAdmissionPolicy cannot match ValidatingAdmissionPolicy and ValidatingAdmissionPolicyBinding.
+ // Required.
+ optional MatchResources matchConstraints = 2;
+
+ // Validations contain CEL expressions which is used to apply the validation.
+ // Validations and AuditAnnotations may not both be empty; a minimum of one Validations or AuditAnnotations is
+ // required.
+ // +listType=atomic
+ // +optional
+ repeated Validation validations = 3;
+
+ // failurePolicy defines how to handle failures for the admission policy. Failures can
+ // occur from CEL expression parse errors, type check errors, runtime errors and invalid
+ // or mis-configured policy definitions or bindings.
+ //
+ // A policy is invalid if spec.paramKind refers to a non-existent Kind.
+ // A binding is invalid if spec.paramRef.name refers to a non-existent resource.
+ //
+ // failurePolicy does not define how validations that evaluate to false are handled.
+ //
+ // When failurePolicy is set to Fail, ValidatingAdmissionPolicyBinding validationActions
+ // define how failures are enforced.
+ //
+ // Allowed values are Ignore or Fail. Defaults to Fail.
+ // +optional
+ optional string failurePolicy = 4;
+
+ // auditAnnotations contains CEL expressions which are used to produce audit
+ // annotations for the audit event of the API request.
+ // validations and auditAnnotations may not both be empty; a least one of validations or auditAnnotations is
+ // required.
+ // +listType=atomic
+ // +optional
+ repeated AuditAnnotation auditAnnotations = 5;
+
+ // MatchConditions is a list of conditions that must be met for a request to be validated.
+ // Match conditions filter requests that have already been matched by the rules,
+ // namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests.
+ // There are a maximum of 64 match conditions allowed.
+ //
+ // If a parameter object is provided, it can be accessed via the `params` handle in the same
+ // manner as validation expressions.
+ //
+ // The exact matching logic is (in order):
+ // 1. If ANY matchCondition evaluates to FALSE, the policy is skipped.
+ // 2. If ALL matchConditions evaluate to TRUE, the policy is evaluated.
+ // 3. If any matchCondition evaluates to an error (but none are FALSE):
+ // - If failurePolicy=Fail, reject the request
+ // - If failurePolicy=Ignore, the policy is skipped
+ //
+ // +patchMergeKey=name
+ // +patchStrategy=merge
+ // +listType=map
+ // +listMapKey=name
+ // +optional
+ repeated MatchCondition matchConditions = 6;
+
+ // Variables contain definitions of variables that can be used in composition of other expressions.
+ // Each variable is defined as a named CEL expression.
+ // The variables defined here will be available under `variables` in other expressions of the policy
+ // except MatchConditions because MatchConditions are evaluated before the rest of the policy.
+ //
+ // The expression of a variable can refer to other variables defined earlier in the list but not those after.
+ // Thus, Variables must be sorted by the order of first appearance and acyclic.
+ // +patchMergeKey=name
+ // +patchStrategy=merge
+ // +listType=map
+ // +listMapKey=name
+ // +optional
+ repeated Variable variables = 7;
+}
+
+// ValidatingAdmissionPolicyStatus represents the status of an admission validation policy.
+message ValidatingAdmissionPolicyStatus {
+ // The generation observed by the controller.
+ // +optional
+ optional int64 observedGeneration = 1;
+
+ // The results of type checking for each expression.
+ // Presence of this field indicates the completion of the type checking.
+ // +optional
+ optional TypeChecking typeChecking = 2;
+
+ // The conditions represent the latest available observations of a policy's current state.
+ // +optional
+ // +listType=map
+ // +listMapKey=type
+ repeated k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 3;
+}
+
// ValidatingWebhook describes an admission webhook and the resources and operations it applies to.
message ValidatingWebhook {
// The name of the admission webhook.
@@ -420,7 +889,7 @@ message ValidatingWebhook {
// - If failurePolicy=Fail, reject the request
// - If failurePolicy=Ignore, the error is ignored and the webhook is skipped
//
- // This is an alpha feature and managed by the AdmissionWebhookMatchConditions feature gate.
+ // This is a beta feature and managed by the AdmissionWebhookMatchConditions feature gate.
//
// +patchMergeKey=name
// +patchStrategy=merge
@@ -456,6 +925,97 @@ message ValidatingWebhookConfigurationList {
repeated ValidatingWebhookConfiguration items = 2;
}
+// Validation specifies the CEL expression which is used to apply the validation.
+message Validation {
+ // Expression represents the expression which will be evaluated by CEL.
+ // ref: https://github.com/google/cel-spec
+ // CEL expressions have access to the contents of the API request/response, organized into CEL variables as well as some other useful variables:
+ //
+ // - 'object' - The object from the incoming request. The value is null for DELETE requests.
+ // - 'oldObject' - The existing object. The value is null for CREATE requests.
+ // - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)).
+ // - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind.
+ // - 'namespaceObject' - The namespace object that the incoming object belongs to. The value is null for cluster-scoped resources.
+ // - 'variables' - Map of composited variables, from its name to its lazily evaluated value.
+ // For example, a variable named 'foo' can be accessed as 'variables.foo'.
+ // - 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request.
+ // See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz
+ // - 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the
+ // request resource.
+ //
+ // The `apiVersion`, `kind`, `metadata.name` and `metadata.generateName` are always accessible from the root of the
+ // object. No other metadata properties are accessible.
+ //
+ // Only property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible.
+ // Accessible property names are escaped according to the following rules when accessed in the expression:
+ // - '__' escapes to '__underscores__'
+ // - '.' escapes to '__dot__'
+ // - '-' escapes to '__dash__'
+ // - '/' escapes to '__slash__'
+ // - Property names that exactly match a CEL RESERVED keyword escape to '__{keyword}__'. The keywords are:
+ // "true", "false", "null", "in", "as", "break", "const", "continue", "else", "for", "function", "if",
+ // "import", "let", "loop", "package", "namespace", "return".
+ // Examples:
+ // - Expression accessing a property named "namespace": {"Expression": "object.__namespace__ > 0"}
+ // - Expression accessing a property named "x-prop": {"Expression": "object.x__dash__prop > 0"}
+ // - Expression accessing a property named "redact__d": {"Expression": "object.redact__underscores__d > 0"}
+ //
+ // Equality on arrays with list type of 'set' or 'map' ignores element order, i.e. [1, 2] == [2, 1].
+ // Concatenation on arrays with x-kubernetes-list-type use the semantics of the list type:
+ // - 'set': `X + Y` performs a union where the array positions of all elements in `X` are preserved and
+ // non-intersecting elements in `Y` are appended, retaining their partial order.
+ // - 'map': `X + Y` performs a merge where the array positions of all keys in `X` are preserved but the values
+ // are overwritten by values in `Y` when the key sets of `X` and `Y` intersect. Elements in `Y` with
+ // non-intersecting keys are appended, retaining their partial order.
+ // Required.
+ optional string Expression = 1;
+
+ // Message represents the message displayed when validation fails. The message is required if the Expression contains
+ // line breaks. The message must not contain line breaks.
+ // If unset, the message is "failed rule: {Rule}".
+ // e.g. "must be a URL with the host matching spec.host"
+ // If the Expression contains line breaks. Message is required.
+ // The message must not contain line breaks.
+ // If unset, the message is "failed Expression: {Expression}".
+ // +optional
+ optional string message = 2;
+
+ // Reason represents a machine-readable description of why this validation failed.
+ // If this is the first validation in the list to fail, this reason, as well as the
+ // corresponding HTTP response code, are used in the
+ // HTTP response to the client.
+ // The currently supported reasons are: "Unauthorized", "Forbidden", "Invalid", "RequestEntityTooLarge".
+ // If not set, StatusReasonInvalid is used in the response to the client.
+ // +optional
+ optional string reason = 3;
+
+ // messageExpression declares a CEL expression that evaluates to the validation failure message that is returned when this rule fails.
+ // Since messageExpression is used as a failure message, it must evaluate to a string.
+ // If both message and messageExpression are present on a validation, then messageExpression will be used if validation fails.
+ // If messageExpression results in a runtime error, the runtime error is logged, and the validation failure message is produced
+ // as if the messageExpression field were unset. If messageExpression evaluates to an empty string, a string with only spaces, or a string
+ // that contains line breaks, then the validation failure message will also be produced as if the messageExpression field were unset, and
+ // the fact that messageExpression produced an empty string/string with only spaces/string with line breaks will be logged.
+ // messageExpression has access to all the same variables as the `expression` except for 'authorizer' and 'authorizer.requestResource'.
+ // Example:
+ // "object.x must be less than max ("+string(params.max)+")"
+ // +optional
+ optional string messageExpression = 4;
+}
+
+// Variable is the definition of a variable that is used for composition. A variable is defined as a named expression.
+// +structType=atomic
+message Variable {
+ // Name is the name of the variable. The name must be a valid CEL identifier and unique among all variables.
+ // The variable can be accessed in other expressions through `variables`
+ // For example, if name is "foo", the variable will be available as `variables.foo`
+ optional string Name = 1;
+
+ // Expression is the expression that will be evaluated as the value of the variable.
+ // The CEL expression has access to the same identifiers as the CEL expressions in Validation.
+ optional string Expression = 2;
+}
+
// WebhookClientConfig contains the information to make a TLS
// connection with the webhook
message WebhookClientConfig {
diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/register.go b/vendor/k8s.io/api/admissionregistration/v1beta1/register.go
index 098744cf63..363233a2f9 100644
--- a/vendor/k8s.io/api/admissionregistration/v1beta1/register.go
+++ b/vendor/k8s.io/api/admissionregistration/v1beta1/register.go
@@ -50,6 +50,10 @@ func addKnownTypes(scheme *runtime.Scheme) error {
&ValidatingWebhookConfigurationList{},
&MutatingWebhookConfiguration{},
&MutatingWebhookConfigurationList{},
+ &ValidatingAdmissionPolicy{},
+ &ValidatingAdmissionPolicyList{},
+ &ValidatingAdmissionPolicyBinding{},
+ &ValidatingAdmissionPolicyBindingList{},
)
metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
return nil
diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/types.go b/vendor/k8s.io/api/admissionregistration/v1beta1/types.go
index 82ee7df9ba..c199702fbd 100644
--- a/vendor/k8s.io/api/admissionregistration/v1beta1/types.go
+++ b/vendor/k8s.io/api/admissionregistration/v1beta1/types.go
@@ -38,6 +38,18 @@ const (
AllScopes ScopeType = v1.AllScopes
)
+// ParameterNotFoundActionType specifies a failure policy that defines how a binding
+// is evaluated when the param referred by its perNamespaceParamRef is not found.
+type ParameterNotFoundActionType string
+
+const (
+ // Allow means all requests will be admitted if no param resources
+ // could be found.
+ AllowAction ParameterNotFoundActionType = "Allow"
+ // Deny means all requests will be denied if no param resources are found.
+ DenyAction ParameterNotFoundActionType = "Deny"
+)
+
// FailurePolicyType specifies a failure policy that defines how unrecognized errors from the admission endpoint are handled.
type FailurePolicyType string
@@ -75,6 +87,584 @@ const (
SideEffectClassNoneOnDryRun SideEffectClass = "NoneOnDryRun"
)
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:prerelease-lifecycle-gen:introduced=1.28
+// ValidatingAdmissionPolicy describes the definition of an admission validation policy that accepts or rejects an object without changing it.
+type ValidatingAdmissionPolicy struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.
+ // +optional
+ metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+ // Specification of the desired behavior of the ValidatingAdmissionPolicy.
+ Spec ValidatingAdmissionPolicySpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+ // The status of the ValidatingAdmissionPolicy, including warnings that are useful to determine if the policy
+ // behaves in the expected way.
+ // Populated by the system.
+ // Read-only.
+ // +optional
+ Status ValidatingAdmissionPolicyStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
+
+// ValidatingAdmissionPolicyStatus represents the status of an admission validation policy.
+type ValidatingAdmissionPolicyStatus struct {
+ // The generation observed by the controller.
+ // +optional
+ ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"`
+ // The results of type checking for each expression.
+ // Presence of this field indicates the completion of the type checking.
+ // +optional
+ TypeChecking *TypeChecking `json:"typeChecking,omitempty" protobuf:"bytes,2,opt,name=typeChecking"`
+ // The conditions represent the latest available observations of a policy's current state.
+ // +optional
+ // +listType=map
+ // +listMapKey=type
+ Conditions []metav1.Condition `json:"conditions,omitempty" protobuf:"bytes,3,rep,name=conditions"`
+}
+
+// ValidatingAdmissionPolicyConditionType is the condition type of admission validation policy.
+type ValidatingAdmissionPolicyConditionType string
+
+// TypeChecking contains results of type checking the expressions in the
+// ValidatingAdmissionPolicy
+type TypeChecking struct {
+ // The type checking warnings for each expression.
+ // +optional
+ // +listType=atomic
+ ExpressionWarnings []ExpressionWarning `json:"expressionWarnings,omitempty" protobuf:"bytes,1,rep,name=expressionWarnings"`
+}
+
+// ExpressionWarning is a warning information that targets a specific expression.
+type ExpressionWarning struct {
+ // The path to the field that refers the expression.
+ // For example, the reference to the expression of the first item of
+ // validations is "spec.validations[0].expression"
+ FieldRef string `json:"fieldRef" protobuf:"bytes,2,opt,name=fieldRef"`
+ // The content of type checking information in a human-readable form.
+ // Each line of the warning contains the type that the expression is checked
+ // against, followed by the type check error from the compiler.
+ Warning string `json:"warning" protobuf:"bytes,3,opt,name=warning"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:prerelease-lifecycle-gen:introduced=1.28
+// ValidatingAdmissionPolicyList is a list of ValidatingAdmissionPolicy.
+type ValidatingAdmissionPolicyList struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard list metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ // +optional
+ metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+ // List of ValidatingAdmissionPolicy.
+ Items []ValidatingAdmissionPolicy `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"`
+}
+
+// ValidatingAdmissionPolicySpec is the specification of the desired behavior of the AdmissionPolicy.
+type ValidatingAdmissionPolicySpec struct {
+ // ParamKind specifies the kind of resources used to parameterize this policy.
+ // If absent, there are no parameters for this policy and the param CEL variable will not be provided to validation expressions.
+ // If ParamKind refers to a non-existent kind, this policy definition is mis-configured and the FailurePolicy is applied.
+ // If paramKind is specified but paramRef is unset in ValidatingAdmissionPolicyBinding, the params variable will be null.
+ // +optional
+ ParamKind *ParamKind `json:"paramKind,omitempty" protobuf:"bytes,1,rep,name=paramKind"`
+
+ // MatchConstraints specifies what resources this policy is designed to validate.
+ // The AdmissionPolicy cares about a request if it matches _all_ Constraints.
+ // However, in order to prevent clusters from being put into an unstable state that cannot be recovered from via the API
+ // ValidatingAdmissionPolicy cannot match ValidatingAdmissionPolicy and ValidatingAdmissionPolicyBinding.
+ // Required.
+ MatchConstraints *MatchResources `json:"matchConstraints,omitempty" protobuf:"bytes,2,rep,name=matchConstraints"`
+
+ // Validations contain CEL expressions which is used to apply the validation.
+ // Validations and AuditAnnotations may not both be empty; a minimum of one Validations or AuditAnnotations is
+ // required.
+ // +listType=atomic
+ // +optional
+ Validations []Validation `json:"validations,omitempty" protobuf:"bytes,3,rep,name=validations"`
+
+ // failurePolicy defines how to handle failures for the admission policy. Failures can
+ // occur from CEL expression parse errors, type check errors, runtime errors and invalid
+ // or mis-configured policy definitions or bindings.
+ //
+ // A policy is invalid if spec.paramKind refers to a non-existent Kind.
+ // A binding is invalid if spec.paramRef.name refers to a non-existent resource.
+ //
+ // failurePolicy does not define how validations that evaluate to false are handled.
+ //
+ // When failurePolicy is set to Fail, ValidatingAdmissionPolicyBinding validationActions
+ // define how failures are enforced.
+ //
+ // Allowed values are Ignore or Fail. Defaults to Fail.
+ // +optional
+ FailurePolicy *FailurePolicyType `json:"failurePolicy,omitempty" protobuf:"bytes,4,opt,name=failurePolicy,casttype=FailurePolicyType"`
+
+ // auditAnnotations contains CEL expressions which are used to produce audit
+ // annotations for the audit event of the API request.
+ // validations and auditAnnotations may not both be empty; a least one of validations or auditAnnotations is
+ // required.
+ // +listType=atomic
+ // +optional
+ AuditAnnotations []AuditAnnotation `json:"auditAnnotations,omitempty" protobuf:"bytes,5,rep,name=auditAnnotations"`
+
+ // MatchConditions is a list of conditions that must be met for a request to be validated.
+ // Match conditions filter requests that have already been matched by the rules,
+ // namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests.
+ // There are a maximum of 64 match conditions allowed.
+ //
+ // If a parameter object is provided, it can be accessed via the `params` handle in the same
+ // manner as validation expressions.
+ //
+ // The exact matching logic is (in order):
+ // 1. If ANY matchCondition evaluates to FALSE, the policy is skipped.
+ // 2. If ALL matchConditions evaluate to TRUE, the policy is evaluated.
+ // 3. If any matchCondition evaluates to an error (but none are FALSE):
+ // - If failurePolicy=Fail, reject the request
+ // - If failurePolicy=Ignore, the policy is skipped
+ //
+ // +patchMergeKey=name
+ // +patchStrategy=merge
+ // +listType=map
+ // +listMapKey=name
+ // +optional
+ MatchConditions []MatchCondition `json:"matchConditions,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,6,rep,name=matchConditions"`
+
+ // Variables contain definitions of variables that can be used in composition of other expressions.
+ // Each variable is defined as a named CEL expression.
+ // The variables defined here will be available under `variables` in other expressions of the policy
+ // except MatchConditions because MatchConditions are evaluated before the rest of the policy.
+ //
+ // The expression of a variable can refer to other variables defined earlier in the list but not those after.
+ // Thus, Variables must be sorted by the order of first appearance and acyclic.
+ // +patchMergeKey=name
+ // +patchStrategy=merge
+ // +listType=map
+ // +listMapKey=name
+ // +optional
+ Variables []Variable `json:"variables" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,7,rep,name=variables"`
+}
+
+// ParamKind is a tuple of Group Kind and Version.
+// +structType=atomic
+type ParamKind struct {
+ // APIVersion is the API group version the resources belong to.
+ // In format of "group/version".
+ // Required.
+ APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,1,rep,name=apiVersion"`
+
+ // Kind is the API kind the resources belong to.
+ // Required.
+ Kind string `json:"kind,omitempty" protobuf:"bytes,2,rep,name=kind"`
+}
+
+// Validation specifies the CEL expression which is used to apply the validation.
+type Validation struct {
+ // Expression represents the expression which will be evaluated by CEL.
+ // ref: https://github.com/google/cel-spec
+ // CEL expressions have access to the contents of the API request/response, organized into CEL variables as well as some other useful variables:
+ //
+ // - 'object' - The object from the incoming request. The value is null for DELETE requests.
+ // - 'oldObject' - The existing object. The value is null for CREATE requests.
+ // - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)).
+ // - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind.
+ // - 'namespaceObject' - The namespace object that the incoming object belongs to. The value is null for cluster-scoped resources.
+ // - 'variables' - Map of composited variables, from its name to its lazily evaluated value.
+ // For example, a variable named 'foo' can be accessed as 'variables.foo'.
+ // - 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request.
+ // See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz
+ // - 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the
+ // request resource.
+ //
+ // The `apiVersion`, `kind`, `metadata.name` and `metadata.generateName` are always accessible from the root of the
+ // object. No other metadata properties are accessible.
+ //
+ // Only property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible.
+ // Accessible property names are escaped according to the following rules when accessed in the expression:
+ // - '__' escapes to '__underscores__'
+ // - '.' escapes to '__dot__'
+ // - '-' escapes to '__dash__'
+ // - '/' escapes to '__slash__'
+ // - Property names that exactly match a CEL RESERVED keyword escape to '__{keyword}__'. The keywords are:
+ // "true", "false", "null", "in", "as", "break", "const", "continue", "else", "for", "function", "if",
+ // "import", "let", "loop", "package", "namespace", "return".
+ // Examples:
+ // - Expression accessing a property named "namespace": {"Expression": "object.__namespace__ > 0"}
+ // - Expression accessing a property named "x-prop": {"Expression": "object.x__dash__prop > 0"}
+ // - Expression accessing a property named "redact__d": {"Expression": "object.redact__underscores__d > 0"}
+ //
+ // Equality on arrays with list type of 'set' or 'map' ignores element order, i.e. [1, 2] == [2, 1].
+ // Concatenation on arrays with x-kubernetes-list-type use the semantics of the list type:
+ // - 'set': `X + Y` performs a union where the array positions of all elements in `X` are preserved and
+ // non-intersecting elements in `Y` are appended, retaining their partial order.
+ // - 'map': `X + Y` performs a merge where the array positions of all keys in `X` are preserved but the values
+ // are overwritten by values in `Y` when the key sets of `X` and `Y` intersect. Elements in `Y` with
+ // non-intersecting keys are appended, retaining their partial order.
+ // Required.
+ Expression string `json:"expression" protobuf:"bytes,1,opt,name=Expression"`
+ // Message represents the message displayed when validation fails. The message is required if the Expression contains
+ // line breaks. The message must not contain line breaks.
+ // If unset, the message is "failed rule: {Rule}".
+ // e.g. "must be a URL with the host matching spec.host"
+ // If the Expression contains line breaks. Message is required.
+ // The message must not contain line breaks.
+ // If unset, the message is "failed Expression: {Expression}".
+ // +optional
+ Message string `json:"message,omitempty" protobuf:"bytes,2,opt,name=message"`
+ // Reason represents a machine-readable description of why this validation failed.
+ // If this is the first validation in the list to fail, this reason, as well as the
+ // corresponding HTTP response code, are used in the
+ // HTTP response to the client.
+ // The currently supported reasons are: "Unauthorized", "Forbidden", "Invalid", "RequestEntityTooLarge".
+ // If not set, StatusReasonInvalid is used in the response to the client.
+ // +optional
+ Reason *metav1.StatusReason `json:"reason,omitempty" protobuf:"bytes,3,opt,name=reason"`
+ // messageExpression declares a CEL expression that evaluates to the validation failure message that is returned when this rule fails.
+ // Since messageExpression is used as a failure message, it must evaluate to a string.
+ // If both message and messageExpression are present on a validation, then messageExpression will be used if validation fails.
+ // If messageExpression results in a runtime error, the runtime error is logged, and the validation failure message is produced
+ // as if the messageExpression field were unset. If messageExpression evaluates to an empty string, a string with only spaces, or a string
+ // that contains line breaks, then the validation failure message will also be produced as if the messageExpression field were unset, and
+ // the fact that messageExpression produced an empty string/string with only spaces/string with line breaks will be logged.
+ // messageExpression has access to all the same variables as the `expression` except for 'authorizer' and 'authorizer.requestResource'.
+ // Example:
+ // "object.x must be less than max ("+string(params.max)+")"
+ // +optional
+ MessageExpression string `json:"messageExpression,omitempty" protobuf:"bytes,4,opt,name=messageExpression"`
+}
+
+// Variable is the definition of a variable that is used for composition. A variable is defined as a named expression.
+// +structType=atomic
+type Variable struct {
+ // Name is the name of the variable. The name must be a valid CEL identifier and unique among all variables.
+ // The variable can be accessed in other expressions through `variables`
+ // For example, if name is "foo", the variable will be available as `variables.foo`
+ Name string `json:"name" protobuf:"bytes,1,opt,name=Name"`
+
+ // Expression is the expression that will be evaluated as the value of the variable.
+ // The CEL expression has access to the same identifiers as the CEL expressions in Validation.
+ Expression string `json:"expression" protobuf:"bytes,2,opt,name=Expression"`
+}
+
+// AuditAnnotation describes how to produce an audit annotation for an API request.
+type AuditAnnotation struct {
+ // key specifies the audit annotation key. The audit annotation keys of
+ // a ValidatingAdmissionPolicy must be unique. The key must be a qualified
+ // name ([A-Za-z0-9][-A-Za-z0-9_.]*) no more than 63 bytes in length.
+ //
+ // The key is combined with the resource name of the
+ // ValidatingAdmissionPolicy to construct an audit annotation key:
+ // "{ValidatingAdmissionPolicy name}/{key}".
+ //
+ // If an admission webhook uses the same resource name as this ValidatingAdmissionPolicy
+ // and the same audit annotation key, the annotation key will be identical.
+ // In this case, the first annotation written with the key will be included
+ // in the audit event and all subsequent annotations with the same key
+ // will be discarded.
+ //
+ // Required.
+ Key string `json:"key" protobuf:"bytes,1,opt,name=key"`
+
+ // valueExpression represents the expression which is evaluated by CEL to
+ // produce an audit annotation value. The expression must evaluate to either
+ // a string or null value. If the expression evaluates to a string, the
+ // audit annotation is included with the string value. If the expression
+ // evaluates to null or empty string the audit annotation will be omitted.
+ // The valueExpression may be no longer than 5kb in length.
+ // If the result of the valueExpression is more than 10kb in length, it
+ // will be truncated to 10kb.
+ //
+ // If multiple ValidatingAdmissionPolicyBinding resources match an
+ // API request, then the valueExpression will be evaluated for
+ // each binding. All unique values produced by the valueExpressions
+ // will be joined together in a comma-separated list.
+ //
+ // Required.
+ ValueExpression string `json:"valueExpression" protobuf:"bytes,2,opt,name=valueExpression"`
+}
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:prerelease-lifecycle-gen:introduced=1.28
+
+// ValidatingAdmissionPolicyBinding binds the ValidatingAdmissionPolicy with paramerized resources.
+// ValidatingAdmissionPolicyBinding and parameter CRDs together define how cluster administrators configure policies for clusters.
+//
+// For a given admission request, each binding will cause its policy to be
+// evaluated N times, where N is 1 for policies/bindings that don't use
+// params, otherwise N is the number of parameters selected by the binding.
+//
+// The CEL expressions of a policy must have a computed CEL cost below the maximum
+// CEL budget. Each evaluation of the policy is given an independent CEL cost budget.
+// Adding/removing policies, bindings, or params can not affect whether a
+// given (policy, binding, param) combination is within its own CEL budget.
+type ValidatingAdmissionPolicyBinding struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.
+ // +optional
+ metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+ // Specification of the desired behavior of the ValidatingAdmissionPolicyBinding.
+ Spec ValidatingAdmissionPolicyBindingSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:prerelease-lifecycle-gen:introduced=1.28
+
+// ValidatingAdmissionPolicyBindingList is a list of ValidatingAdmissionPolicyBinding.
+type ValidatingAdmissionPolicyBindingList struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard list metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ // +optional
+ metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+ // List of PolicyBinding.
+ Items []ValidatingAdmissionPolicyBinding `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"`
+}
+
+// ValidatingAdmissionPolicyBindingSpec is the specification of the ValidatingAdmissionPolicyBinding.
+type ValidatingAdmissionPolicyBindingSpec struct {
+ // PolicyName references a ValidatingAdmissionPolicy name which the ValidatingAdmissionPolicyBinding binds to.
+ // If the referenced resource does not exist, this binding is considered invalid and will be ignored
+ // Required.
+ PolicyName string `json:"policyName,omitempty" protobuf:"bytes,1,rep,name=policyName"`
+
+ // paramRef specifies the parameter resource used to configure the admission control policy.
+ // It should point to a resource of the type specified in ParamKind of the bound ValidatingAdmissionPolicy.
+ // If the policy specifies a ParamKind and the resource referred to by ParamRef does not exist, this binding is considered mis-configured and the FailurePolicy of the ValidatingAdmissionPolicy applied.
+ // If the policy does not specify a ParamKind then this field is ignored, and the rules are evaluated without a param.
+ // +optional
+ ParamRef *ParamRef `json:"paramRef,omitempty" protobuf:"bytes,2,rep,name=paramRef"`
+
+ // MatchResources declares what resources match this binding and will be validated by it.
+ // Note that this is intersected with the policy's matchConstraints, so only requests that are matched by the policy can be selected by this.
+ // If this is unset, all resources matched by the policy are validated by this binding
+ // When resourceRules is unset, it does not constrain resource matching. If a resource is matched by the other fields of this object, it will be validated.
+ // Note that this is differs from ValidatingAdmissionPolicy matchConstraints, where resourceRules are required.
+ // +optional
+ MatchResources *MatchResources `json:"matchResources,omitempty" protobuf:"bytes,3,rep,name=matchResources"`
+
+ // validationActions declares how Validations of the referenced ValidatingAdmissionPolicy are enforced.
+ // If a validation evaluates to false it is always enforced according to these actions.
+ //
+ // Failures defined by the ValidatingAdmissionPolicy's FailurePolicy are enforced according
+ // to these actions only if the FailurePolicy is set to Fail, otherwise the failures are
+ // ignored. This includes compilation errors, runtime errors and misconfigurations of the policy.
+ //
+ // validationActions is declared as a set of action values. Order does
+ // not matter. validationActions may not contain duplicates of the same action.
+ //
+ // The supported actions values are:
+ //
+ // "Deny" specifies that a validation failure results in a denied request.
+ //
+ // "Warn" specifies that a validation failure is reported to the request client
+ // in HTTP Warning headers, with a warning code of 299. Warnings can be sent
+ // both for allowed or denied admission responses.
+ //
+ // "Audit" specifies that a validation failure is included in the published
+ // audit event for the request. The audit event will contain a
+ // `validation.policy.admission.k8s.io/validation_failure` audit annotation
+ // with a value containing the details of the validation failures, formatted as
+ // a JSON list of objects, each with the following fields:
+ // - message: The validation failure message string
+ // - policy: The resource name of the ValidatingAdmissionPolicy
+ // - binding: The resource name of the ValidatingAdmissionPolicyBinding
+ // - expressionIndex: The index of the failed validations in the ValidatingAdmissionPolicy
+ // - validationActions: The enforcement actions enacted for the validation failure
+ // Example audit annotation:
+ // `"validation.policy.admission.k8s.io/validation_failure": "[{\"message\": \"Invalid value\", {\"policy\": \"policy.example.com\", {\"binding\": \"policybinding.example.com\", {\"expressionIndex\": \"1\", {\"validationActions\": [\"Audit\"]}]"`
+ //
+ // Clients should expect to handle additional values by ignoring
+ // any values not recognized.
+ //
+ // "Deny" and "Warn" may not be used together since this combination
+ // needlessly duplicates the validation failure both in the
+ // API response body and the HTTP warning headers.
+ //
+ // Required.
+ // +listType=set
+ ValidationActions []ValidationAction `json:"validationActions,omitempty" protobuf:"bytes,4,rep,name=validationActions"`
+}
+
+// ParamRef describes how to locate the params to be used as input to
+// expressions of rules applied by a policy binding.
+// +structType=atomic
+type ParamRef struct {
+ // name is the name of the resource being referenced.
+ //
+ // One of `name` or `selector` must be set, but `name` and `selector` are
+ // mutually exclusive properties. If one is set, the other must be unset.
+ //
+ // A single parameter used for all admission requests can be configured
+ // by setting the `name` field, leaving `selector` blank, and setting namespace
+ // if `paramKind` is namespace-scoped.
+ //
+ Name string `json:"name,omitempty" protobuf:"bytes,1,rep,name=name"`
+
+ // namespace is the namespace of the referenced resource. Allows limiting
+ // the search for params to a specific namespace. Applies to both `name` and
+ // `selector` fields.
+ //
+ // A per-namespace parameter may be used by specifying a namespace-scoped
+ // `paramKind` in the policy and leaving this field empty.
+ //
+ // - If `paramKind` is cluster-scoped, this field MUST be unset. Setting this
+ // field results in a configuration error.
+ //
+ // - If `paramKind` is namespace-scoped, the namespace of the object being
+ // evaluated for admission will be used when this field is left unset. Take
+ // care that if this is left empty the binding must not match any cluster-scoped
+ // resources, which will result in an error.
+ //
+ // +optional
+ Namespace string `json:"namespace,omitempty" protobuf:"bytes,2,rep,name=namespace"`
+
+ // selector can be used to match multiple param objects based on their labels.
+ // Supply selector: {} to match all resources of the ParamKind.
+ //
+ // If multiple params are found, they are all evaluated with the policy expressions
+ // and the results are ANDed together.
+ //
+ // One of `name` or `selector` must be set, but `name` and `selector` are
+ // mutually exclusive properties. If one is set, the other must be unset.
+ //
+ // +optional
+ Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,3,rep,name=selector"`
+
+ // `parameterNotFoundAction` controls the behavior of the binding when the resource
+ // exists, and name or selector is valid, but there are no parameters
+ // matched by the binding. If the value is set to `Allow`, then no
+ // matched parameters will be treated as successful validation by the binding.
+ // If set to `Deny`, then no matched parameters will be subject to the
+ // `failurePolicy` of the policy.
+ //
+ // Allowed values are `Allow` or `Deny`
+ //
+ // Required
+ ParameterNotFoundAction *ParameterNotFoundActionType `json:"parameterNotFoundAction,omitempty" protobuf:"bytes,4,rep,name=parameterNotFoundAction"`
+}
+
+// MatchResources decides whether to run the admission control policy on an object based
+// on whether it meets the match criteria.
+// The exclude rules take precedence over include rules (if a resource matches both, it is excluded)
+// +structType=atomic
+type MatchResources struct {
+ // NamespaceSelector decides whether to run the admission control policy on an object based
+ // on whether the namespace for that object matches the selector. If the
+ // object itself is a namespace, the matching is performed on
+ // object.metadata.labels. If the object is another cluster scoped resource,
+ // it never skips the policy.
+ //
+ // For example, to run the webhook on any objects whose namespace is not
+ // associated with "runlevel" of "0" or "1"; you will set the selector as
+ // follows:
+ // "namespaceSelector": {
+ // "matchExpressions": [
+ // {
+ // "key": "runlevel",
+ // "operator": "NotIn",
+ // "values": [
+ // "0",
+ // "1"
+ // ]
+ // }
+ // ]
+ // }
+ //
+ // If instead you want to only run the policy on any objects whose
+ // namespace is associated with the "environment" of "prod" or "staging";
+ // you will set the selector as follows:
+ // "namespaceSelector": {
+ // "matchExpressions": [
+ // {
+ // "key": "environment",
+ // "operator": "In",
+ // "values": [
+ // "prod",
+ // "staging"
+ // ]
+ // }
+ // ]
+ // }
+ //
+ // See
+ // https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
+ // for more examples of label selectors.
+ //
+ // Default to the empty LabelSelector, which matches everything.
+ // +optional
+ NamespaceSelector *metav1.LabelSelector `json:"namespaceSelector,omitempty" protobuf:"bytes,1,opt,name=namespaceSelector"`
+ // ObjectSelector decides whether to run the validation based on if the
+ // object has matching labels. objectSelector is evaluated against both
+ // the oldObject and newObject that would be sent to the cel validation, and
+ // is considered to match if either object matches the selector. A null
+ // object (oldObject in the case of create, or newObject in the case of
+ // delete) or an object that cannot have labels (like a
+ // DeploymentRollback or a PodProxyOptions object) is not considered to
+ // match.
+ // Use the object selector only if the webhook is opt-in, because end
+ // users may skip the admission webhook by setting the labels.
+ // Default to the empty LabelSelector, which matches everything.
+ // +optional
+ ObjectSelector *metav1.LabelSelector `json:"objectSelector,omitempty" protobuf:"bytes,2,opt,name=objectSelector"`
+ // ResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy matches.
+ // The policy cares about an operation if it matches _any_ Rule.
+ // +listType=atomic
+ // +optional
+ ResourceRules []NamedRuleWithOperations `json:"resourceRules,omitempty" protobuf:"bytes,3,rep,name=resourceRules"`
+ // ExcludeResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy should not care about.
+ // The exclude rules take precedence over include rules (if a resource matches both, it is excluded)
+ // +listType=atomic
+ // +optional
+ ExcludeResourceRules []NamedRuleWithOperations `json:"excludeResourceRules,omitempty" protobuf:"bytes,4,rep,name=excludeResourceRules"`
+ // matchPolicy defines how the "MatchResources" list is used to match incoming requests.
+ // Allowed values are "Exact" or "Equivalent".
+ //
+ // - Exact: match a request only if it exactly matches a specified rule.
+ // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1,
+ // but "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`,
+ // a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the ValidatingAdmissionPolicy.
+ //
+ // - Equivalent: match a request if modifies a resource listed in rules, even via another API group or version.
+ // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1,
+ // and "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`,
+ // a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the ValidatingAdmissionPolicy.
+ //
+ // Defaults to "Equivalent"
+ // +optional
+ MatchPolicy *MatchPolicyType `json:"matchPolicy,omitempty" protobuf:"bytes,7,opt,name=matchPolicy,casttype=MatchPolicyType"`
+}
+
+// ValidationAction specifies a policy enforcement action.
+// +enum
+type ValidationAction string
+
+const (
+ // Deny specifies that a validation failure results in a denied request.
+ Deny ValidationAction = "Deny"
+ // Warn specifies that a validation failure is reported to the request client
+ // in HTTP Warning headers, with a warning code of 299. Warnings can be sent
+ // both for allowed or denied admission responses.
+ Warn ValidationAction = "Warn"
+ // Audit specifies that a validation failure is included in the published
+ // audit event for the request. The audit event will contain a
+ // `validation.policy.admission.k8s.io/validation_failure` audit annotation
+ // with a value containing the details of the validation failure.
+ Audit ValidationAction = "Audit"
+)
+
+// NamedRuleWithOperations is a tuple of Operations and Resources with ResourceNames.
+// +structType=atomic
+type NamedRuleWithOperations struct {
+ // ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed.
+ // +listType=atomic
+ // +optional
+ ResourceNames []string `json:"resourceNames,omitempty" protobuf:"bytes,1,rep,name=resourceNames"`
+ // RuleWithOperations is a tuple of Operations and Resources.
+ RuleWithOperations `json:",inline" protobuf:"bytes,2,opt,name=ruleWithOperations"`
+}
+
// +genclient
// +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
@@ -296,7 +886,7 @@ type ValidatingWebhook struct {
// - If failurePolicy=Fail, reject the request
// - If failurePolicy=Ignore, the error is ignored and the webhook is skipped
//
- // This is an alpha feature and managed by the AdmissionWebhookMatchConditions feature gate.
+ // This is a beta feature and managed by the AdmissionWebhookMatchConditions feature gate.
//
// +patchMergeKey=name
// +patchStrategy=merge
@@ -468,7 +1058,7 @@ type MutatingWebhook struct {
// - If failurePolicy=Fail, reject the request
// - If failurePolicy=Ignore, the error is ignored and the webhook is skipped
//
- // This is an alpha feature and managed by the AdmissionWebhookMatchConditions feature gate.
+ // This is a beta feature and managed by the AdmissionWebhookMatchConditions feature gate.
//
// +patchMergeKey=name
// +patchStrategy=merge
diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go
index 2c0a9f0117..adaf4bc11d 100644
--- a/vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go
+++ b/vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go
@@ -27,6 +27,26 @@ package v1beta1
// Those methods can be generated by using hack/update-codegen.sh
// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT.
+var map_AuditAnnotation = map[string]string{
+ "": "AuditAnnotation describes how to produce an audit annotation for an API request.",
+ "key": "key specifies the audit annotation key. The audit annotation keys of a ValidatingAdmissionPolicy must be unique. The key must be a qualified name ([A-Za-z0-9][-A-Za-z0-9_.]*) no more than 63 bytes in length.\n\nThe key is combined with the resource name of the ValidatingAdmissionPolicy to construct an audit annotation key: \"{ValidatingAdmissionPolicy name}/{key}\".\n\nIf an admission webhook uses the same resource name as this ValidatingAdmissionPolicy and the same audit annotation key, the annotation key will be identical. In this case, the first annotation written with the key will be included in the audit event and all subsequent annotations with the same key will be discarded.\n\nRequired.",
+ "valueExpression": "valueExpression represents the expression which is evaluated by CEL to produce an audit annotation value. The expression must evaluate to either a string or null value. If the expression evaluates to a string, the audit annotation is included with the string value. If the expression evaluates to null or empty string the audit annotation will be omitted. The valueExpression may be no longer than 5kb in length. If the result of the valueExpression is more than 10kb in length, it will be truncated to 10kb.\n\nIf multiple ValidatingAdmissionPolicyBinding resources match an API request, then the valueExpression will be evaluated for each binding. All unique values produced by the valueExpressions will be joined together in a comma-separated list.\n\nRequired.",
+}
+
+func (AuditAnnotation) SwaggerDoc() map[string]string {
+ return map_AuditAnnotation
+}
+
+var map_ExpressionWarning = map[string]string{
+ "": "ExpressionWarning is a warning information that targets a specific expression.",
+ "fieldRef": "The path to the field that refers the expression. For example, the reference to the expression of the first item of validations is \"spec.validations[0].expression\"",
+ "warning": "The content of type checking information in a human-readable form. Each line of the warning contains the type that the expression is checked against, followed by the type check error from the compiler.",
+}
+
+func (ExpressionWarning) SwaggerDoc() map[string]string {
+ return map_ExpressionWarning
+}
+
var map_MatchCondition = map[string]string{
"": "MatchCondition represents a condition which must be fulfilled for a request to be sent to a webhook.",
"name": "Name is an identifier for this match condition, used for strategic merging of MatchConditions, as well as providing an identifier for logging purposes. A good name should be descriptive of the associated expression. Name must be a qualified name consisting of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character (e.g. 'MyName', or 'my.name', or '123-abc', regex used for validation is '([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]') with an optional DNS subdomain prefix and '/' (e.g. 'example.com/MyName')\n\nRequired.",
@@ -37,6 +57,19 @@ func (MatchCondition) SwaggerDoc() map[string]string {
return map_MatchCondition
}
+var map_MatchResources = map[string]string{
+ "": "MatchResources decides whether to run the admission control policy on an object based on whether it meets the match criteria. The exclude rules take precedence over include rules (if a resource matches both, it is excluded)",
+ "namespaceSelector": "NamespaceSelector decides whether to run the admission control policy on an object based on whether the namespace for that object matches the selector. If the object itself is a namespace, the matching is performed on object.metadata.labels. If the object is another cluster scoped resource, it never skips the policy.\n\nFor example, to run the webhook on any objects whose namespace is not associated with \"runlevel\" of \"0\" or \"1\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"runlevel\",\n \"operator\": \"NotIn\",\n \"values\": [\n \"0\",\n \"1\"\n ]\n }\n ]\n}\n\nIf instead you want to only run the policy on any objects whose namespace is associated with the \"environment\" of \"prod\" or \"staging\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"environment\",\n \"operator\": \"In\",\n \"values\": [\n \"prod\",\n \"staging\"\n ]\n }\n ]\n}\n\nSee https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ for more examples of label selectors.\n\nDefault to the empty LabelSelector, which matches everything.",
+ "objectSelector": "ObjectSelector decides whether to run the validation based on if the object has matching labels. objectSelector is evaluated against both the oldObject and newObject that would be sent to the cel validation, and is considered to match if either object matches the selector. A null object (oldObject in the case of create, or newObject in the case of delete) or an object that cannot have labels (like a DeploymentRollback or a PodProxyOptions object) is not considered to match. Use the object selector only if the webhook is opt-in, because end users may skip the admission webhook by setting the labels. Default to the empty LabelSelector, which matches everything.",
+ "resourceRules": "ResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy matches. The policy cares about an operation if it matches _any_ Rule.",
+ "excludeResourceRules": "ExcludeResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy should not care about. The exclude rules take precedence over include rules (if a resource matches both, it is excluded)",
+ "matchPolicy": "matchPolicy defines how the \"MatchResources\" list is used to match incoming requests. Allowed values are \"Exact\" or \"Equivalent\".\n\n- Exact: match a request only if it exactly matches a specified rule. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, but \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the ValidatingAdmissionPolicy.\n\n- Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, and \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the ValidatingAdmissionPolicy.\n\nDefaults to \"Equivalent\"",
+}
+
+func (MatchResources) SwaggerDoc() map[string]string {
+ return map_MatchResources
+}
+
var map_MutatingWebhook = map[string]string{
"": "MutatingWebhook describes an admission webhook and the resources and operations it applies to.",
"name": "The name of the admission webhook. Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where \"imagepolicy\" is the name of the webhook, and kubernetes.io is the name of the organization. Required.",
@@ -50,7 +83,7 @@ var map_MutatingWebhook = map[string]string{
"timeoutSeconds": "TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, the webhook call will be ignored or the API call will fail based on the failure policy. The timeout value must be between 1 and 30 seconds. Default to 30 seconds.",
"admissionReviewVersions": "AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` versions the Webhook expects. API server will try to use first version in the list which it supports. If none of the versions specified in this list supported by API server, validation will fail for this object. If a persisted webhook configuration specifies allowed versions and does not include any versions known to the API Server, calls to the webhook will fail and be subject to the failure policy. Default to `['v1beta1']`.",
"reinvocationPolicy": "reinvocationPolicy indicates whether this webhook should be called multiple times as part of a single admission evaluation. Allowed values are \"Never\" and \"IfNeeded\".\n\nNever: the webhook will not be called more than once in a single admission evaluation.\n\nIfNeeded: the webhook will be called at least one additional time as part of the admission evaluation if the object being admitted is modified by other admission plugins after the initial webhook call. Webhooks that specify this option *must* be idempotent, able to process objects they previously admitted. Note: * the number of additional invocations is not guaranteed to be exactly one. * if additional invocations result in further modifications to the object, webhooks are not guaranteed to be invoked again. * webhooks that use this option may be reordered to minimize the number of additional invocations. * to validate an object after all mutations are guaranteed complete, use a validating admission webhook instead.\n\nDefaults to \"Never\".",
- "matchConditions": "MatchConditions is a list of conditions that must be met for a request to be sent to this webhook. Match conditions filter requests that have already been matched by the rules, namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. There are a maximum of 64 match conditions allowed.\n\nThe exact matching logic is (in order):\n 1. If ANY matchCondition evaluates to FALSE, the webhook is skipped.\n 2. If ALL matchConditions evaluate to TRUE, the webhook is called.\n 3. If any matchCondition evaluates to an error (but none are FALSE):\n - If failurePolicy=Fail, reject the request\n - If failurePolicy=Ignore, the error is ignored and the webhook is skipped\n\nThis is an alpha feature and managed by the AdmissionWebhookMatchConditions feature gate.",
+ "matchConditions": "MatchConditions is a list of conditions that must be met for a request to be sent to this webhook. Match conditions filter requests that have already been matched by the rules, namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. There are a maximum of 64 match conditions allowed.\n\nThe exact matching logic is (in order):\n 1. If ANY matchCondition evaluates to FALSE, the webhook is skipped.\n 2. If ALL matchConditions evaluate to TRUE, the webhook is called.\n 3. If any matchCondition evaluates to an error (but none are FALSE):\n - If failurePolicy=Fail, reject the request\n - If failurePolicy=Ignore, the error is ignored and the webhook is skipped\n\nThis is a beta feature and managed by the AdmissionWebhookMatchConditions feature gate.",
}
func (MutatingWebhook) SwaggerDoc() map[string]string {
@@ -77,6 +110,37 @@ func (MutatingWebhookConfigurationList) SwaggerDoc() map[string]string {
return map_MutatingWebhookConfigurationList
}
+var map_NamedRuleWithOperations = map[string]string{
+ "": "NamedRuleWithOperations is a tuple of Operations and Resources with ResourceNames.",
+ "resourceNames": "ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed.",
+}
+
+func (NamedRuleWithOperations) SwaggerDoc() map[string]string {
+ return map_NamedRuleWithOperations
+}
+
+var map_ParamKind = map[string]string{
+ "": "ParamKind is a tuple of Group Kind and Version.",
+ "apiVersion": "APIVersion is the API group version the resources belong to. In format of \"group/version\". Required.",
+ "kind": "Kind is the API kind the resources belong to. Required.",
+}
+
+func (ParamKind) SwaggerDoc() map[string]string {
+ return map_ParamKind
+}
+
+var map_ParamRef = map[string]string{
+ "": "ParamRef describes how to locate the params to be used as input to expressions of rules applied by a policy binding.",
+ "name": "name is the name of the resource being referenced.\n\nOne of `name` or `selector` must be set, but `name` and `selector` are mutually exclusive properties. If one is set, the other must be unset.\n\nA single parameter used for all admission requests can be configured by setting the `name` field, leaving `selector` blank, and setting namespace if `paramKind` is namespace-scoped.",
+ "namespace": "namespace is the namespace of the referenced resource. Allows limiting the search for params to a specific namespace. Applies to both `name` and `selector` fields.\n\nA per-namespace parameter may be used by specifying a namespace-scoped `paramKind` in the policy and leaving this field empty.\n\n- If `paramKind` is cluster-scoped, this field MUST be unset. Setting this field results in a configuration error.\n\n- If `paramKind` is namespace-scoped, the namespace of the object being evaluated for admission will be used when this field is left unset. Take care that if this is left empty the binding must not match any cluster-scoped resources, which will result in an error.",
+ "selector": "selector can be used to match multiple param objects based on their labels. Supply selector: {} to match all resources of the ParamKind.\n\nIf multiple params are found, they are all evaluated with the policy expressions and the results are ANDed together.\n\nOne of `name` or `selector` must be set, but `name` and `selector` are mutually exclusive properties. If one is set, the other must be unset.",
+ "parameterNotFoundAction": "`parameterNotFoundAction` controls the behavior of the binding when the resource exists, and name or selector is valid, but there are no parameters matched by the binding. If the value is set to `Allow`, then no matched parameters will be treated as successful validation by the binding. If set to `Deny`, then no matched parameters will be subject to the `failurePolicy` of the policy.\n\nAllowed values are `Allow` or `Deny`\n\nRequired",
+}
+
+func (ParamRef) SwaggerDoc() map[string]string {
+ return map_ParamRef
+}
+
var map_ServiceReference = map[string]string{
"": "ServiceReference holds a reference to Service.legacy.k8s.io",
"namespace": "`namespace` is the namespace of the service. Required",
@@ -89,6 +153,94 @@ func (ServiceReference) SwaggerDoc() map[string]string {
return map_ServiceReference
}
+var map_TypeChecking = map[string]string{
+ "": "TypeChecking contains results of type checking the expressions in the ValidatingAdmissionPolicy",
+ "expressionWarnings": "The type checking warnings for each expression.",
+}
+
+func (TypeChecking) SwaggerDoc() map[string]string {
+ return map_TypeChecking
+}
+
+var map_ValidatingAdmissionPolicy = map[string]string{
+ "": "ValidatingAdmissionPolicy describes the definition of an admission validation policy that accepts or rejects an object without changing it.",
+ "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.",
+ "spec": "Specification of the desired behavior of the ValidatingAdmissionPolicy.",
+ "status": "The status of the ValidatingAdmissionPolicy, including warnings that are useful to determine if the policy behaves in the expected way. Populated by the system. Read-only.",
+}
+
+func (ValidatingAdmissionPolicy) SwaggerDoc() map[string]string {
+ return map_ValidatingAdmissionPolicy
+}
+
+var map_ValidatingAdmissionPolicyBinding = map[string]string{
+ "": "ValidatingAdmissionPolicyBinding binds the ValidatingAdmissionPolicy with paramerized resources. ValidatingAdmissionPolicyBinding and parameter CRDs together define how cluster administrators configure policies for clusters.\n\nFor a given admission request, each binding will cause its policy to be evaluated N times, where N is 1 for policies/bindings that don't use params, otherwise N is the number of parameters selected by the binding.\n\nThe CEL expressions of a policy must have a computed CEL cost below the maximum CEL budget. Each evaluation of the policy is given an independent CEL cost budget. Adding/removing policies, bindings, or params can not affect whether a given (policy, binding, param) combination is within its own CEL budget.",
+ "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.",
+ "spec": "Specification of the desired behavior of the ValidatingAdmissionPolicyBinding.",
+}
+
+func (ValidatingAdmissionPolicyBinding) SwaggerDoc() map[string]string {
+ return map_ValidatingAdmissionPolicyBinding
+}
+
+var map_ValidatingAdmissionPolicyBindingList = map[string]string{
+ "": "ValidatingAdmissionPolicyBindingList is a list of ValidatingAdmissionPolicyBinding.",
+ "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
+ "items": "List of PolicyBinding.",
+}
+
+func (ValidatingAdmissionPolicyBindingList) SwaggerDoc() map[string]string {
+ return map_ValidatingAdmissionPolicyBindingList
+}
+
+var map_ValidatingAdmissionPolicyBindingSpec = map[string]string{
+ "": "ValidatingAdmissionPolicyBindingSpec is the specification of the ValidatingAdmissionPolicyBinding.",
+ "policyName": "PolicyName references a ValidatingAdmissionPolicy name which the ValidatingAdmissionPolicyBinding binds to. If the referenced resource does not exist, this binding is considered invalid and will be ignored Required.",
+ "paramRef": "paramRef specifies the parameter resource used to configure the admission control policy. It should point to a resource of the type specified in ParamKind of the bound ValidatingAdmissionPolicy. If the policy specifies a ParamKind and the resource referred to by ParamRef does not exist, this binding is considered mis-configured and the FailurePolicy of the ValidatingAdmissionPolicy applied. If the policy does not specify a ParamKind then this field is ignored, and the rules are evaluated without a param.",
+ "matchResources": "MatchResources declares what resources match this binding and will be validated by it. Note that this is intersected with the policy's matchConstraints, so only requests that are matched by the policy can be selected by this. If this is unset, all resources matched by the policy are validated by this binding When resourceRules is unset, it does not constrain resource matching. If a resource is matched by the other fields of this object, it will be validated. Note that this is differs from ValidatingAdmissionPolicy matchConstraints, where resourceRules are required.",
+ "validationActions": "validationActions declares how Validations of the referenced ValidatingAdmissionPolicy are enforced. If a validation evaluates to false it is always enforced according to these actions.\n\nFailures defined by the ValidatingAdmissionPolicy's FailurePolicy are enforced according to these actions only if the FailurePolicy is set to Fail, otherwise the failures are ignored. This includes compilation errors, runtime errors and misconfigurations of the policy.\n\nvalidationActions is declared as a set of action values. Order does not matter. validationActions may not contain duplicates of the same action.\n\nThe supported actions values are:\n\n\"Deny\" specifies that a validation failure results in a denied request.\n\n\"Warn\" specifies that a validation failure is reported to the request client in HTTP Warning headers, with a warning code of 299. Warnings can be sent both for allowed or denied admission responses.\n\n\"Audit\" specifies that a validation failure is included in the published audit event for the request. The audit event will contain a `validation.policy.admission.k8s.io/validation_failure` audit annotation with a value containing the details of the validation failures, formatted as a JSON list of objects, each with the following fields: - message: The validation failure message string - policy: The resource name of the ValidatingAdmissionPolicy - binding: The resource name of the ValidatingAdmissionPolicyBinding - expressionIndex: The index of the failed validations in the ValidatingAdmissionPolicy - validationActions: The enforcement actions enacted for the validation failure Example audit annotation: `\"validation.policy.admission.k8s.io/validation_failure\": \"[{\"message\": \"Invalid value\", {\"policy\": \"policy.example.com\", {\"binding\": \"policybinding.example.com\", {\"expressionIndex\": \"1\", {\"validationActions\": [\"Audit\"]}]\"`\n\nClients should expect to handle additional values by ignoring any values not recognized.\n\n\"Deny\" and \"Warn\" may not be used together since this combination needlessly duplicates the validation failure both in the API response body and the HTTP warning headers.\n\nRequired.",
+}
+
+func (ValidatingAdmissionPolicyBindingSpec) SwaggerDoc() map[string]string {
+ return map_ValidatingAdmissionPolicyBindingSpec
+}
+
+var map_ValidatingAdmissionPolicyList = map[string]string{
+ "": "ValidatingAdmissionPolicyList is a list of ValidatingAdmissionPolicy.",
+ "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
+ "items": "List of ValidatingAdmissionPolicy.",
+}
+
+func (ValidatingAdmissionPolicyList) SwaggerDoc() map[string]string {
+ return map_ValidatingAdmissionPolicyList
+}
+
+var map_ValidatingAdmissionPolicySpec = map[string]string{
+ "": "ValidatingAdmissionPolicySpec is the specification of the desired behavior of the AdmissionPolicy.",
+ "paramKind": "ParamKind specifies the kind of resources used to parameterize this policy. If absent, there are no parameters for this policy and the param CEL variable will not be provided to validation expressions. If ParamKind refers to a non-existent kind, this policy definition is mis-configured and the FailurePolicy is applied. If paramKind is specified but paramRef is unset in ValidatingAdmissionPolicyBinding, the params variable will be null.",
+ "matchConstraints": "MatchConstraints specifies what resources this policy is designed to validate. The AdmissionPolicy cares about a request if it matches _all_ Constraints. However, in order to prevent clusters from being put into an unstable state that cannot be recovered from via the API ValidatingAdmissionPolicy cannot match ValidatingAdmissionPolicy and ValidatingAdmissionPolicyBinding. Required.",
+ "validations": "Validations contain CEL expressions which is used to apply the validation. Validations and AuditAnnotations may not both be empty; a minimum of one Validations or AuditAnnotations is required.",
+ "failurePolicy": "failurePolicy defines how to handle failures for the admission policy. Failures can occur from CEL expression parse errors, type check errors, runtime errors and invalid or mis-configured policy definitions or bindings.\n\nA policy is invalid if spec.paramKind refers to a non-existent Kind. A binding is invalid if spec.paramRef.name refers to a non-existent resource.\n\nfailurePolicy does not define how validations that evaluate to false are handled.\n\nWhen failurePolicy is set to Fail, ValidatingAdmissionPolicyBinding validationActions define how failures are enforced.\n\nAllowed values are Ignore or Fail. Defaults to Fail.",
+ "auditAnnotations": "auditAnnotations contains CEL expressions which are used to produce audit annotations for the audit event of the API request. validations and auditAnnotations may not both be empty; a least one of validations or auditAnnotations is required.",
+ "matchConditions": "MatchConditions is a list of conditions that must be met for a request to be validated. Match conditions filter requests that have already been matched by the rules, namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. There are a maximum of 64 match conditions allowed.\n\nIf a parameter object is provided, it can be accessed via the `params` handle in the same manner as validation expressions.\n\nThe exact matching logic is (in order):\n 1. If ANY matchCondition evaluates to FALSE, the policy is skipped.\n 2. If ALL matchConditions evaluate to TRUE, the policy is evaluated.\n 3. If any matchCondition evaluates to an error (but none are FALSE):\n - If failurePolicy=Fail, reject the request\n - If failurePolicy=Ignore, the policy is skipped",
+ "variables": "Variables contain definitions of variables that can be used in composition of other expressions. Each variable is defined as a named CEL expression. The variables defined here will be available under `variables` in other expressions of the policy except MatchConditions because MatchConditions are evaluated before the rest of the policy.\n\nThe expression of a variable can refer to other variables defined earlier in the list but not those after. Thus, Variables must be sorted by the order of first appearance and acyclic.",
+}
+
+func (ValidatingAdmissionPolicySpec) SwaggerDoc() map[string]string {
+ return map_ValidatingAdmissionPolicySpec
+}
+
+var map_ValidatingAdmissionPolicyStatus = map[string]string{
+ "": "ValidatingAdmissionPolicyStatus represents the status of an admission validation policy.",
+ "observedGeneration": "The generation observed by the controller.",
+ "typeChecking": "The results of type checking for each expression. Presence of this field indicates the completion of the type checking.",
+ "conditions": "The conditions represent the latest available observations of a policy's current state.",
+}
+
+func (ValidatingAdmissionPolicyStatus) SwaggerDoc() map[string]string {
+ return map_ValidatingAdmissionPolicyStatus
+}
+
var map_ValidatingWebhook = map[string]string{
"": "ValidatingWebhook describes an admission webhook and the resources and operations it applies to.",
"name": "The name of the admission webhook. Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where \"imagepolicy\" is the name of the webhook, and kubernetes.io is the name of the organization. Required.",
@@ -101,7 +253,7 @@ var map_ValidatingWebhook = map[string]string{
"sideEffects": "SideEffects states whether this webhook has side effects. Acceptable values are: Unknown, None, Some, NoneOnDryRun Webhooks with side effects MUST implement a reconciliation system, since a request may be rejected by a future step in the admission chain and the side effects therefore need to be undone. Requests with the dryRun attribute will be auto-rejected if they match a webhook with sideEffects == Unknown or Some. Defaults to Unknown.",
"timeoutSeconds": "TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, the webhook call will be ignored or the API call will fail based on the failure policy. The timeout value must be between 1 and 30 seconds. Default to 30 seconds.",
"admissionReviewVersions": "AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` versions the Webhook expects. API server will try to use first version in the list which it supports. If none of the versions specified in this list supported by API server, validation will fail for this object. If a persisted webhook configuration specifies allowed versions and does not include any versions known to the API Server, calls to the webhook will fail and be subject to the failure policy. Default to `['v1beta1']`.",
- "matchConditions": "MatchConditions is a list of conditions that must be met for a request to be sent to this webhook. Match conditions filter requests that have already been matched by the rules, namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. There are a maximum of 64 match conditions allowed.\n\nThe exact matching logic is (in order):\n 1. If ANY matchCondition evaluates to FALSE, the webhook is skipped.\n 2. If ALL matchConditions evaluate to TRUE, the webhook is called.\n 3. If any matchCondition evaluates to an error (but none are FALSE):\n - If failurePolicy=Fail, reject the request\n - If failurePolicy=Ignore, the error is ignored and the webhook is skipped\n\nThis is an alpha feature and managed by the AdmissionWebhookMatchConditions feature gate.",
+ "matchConditions": "MatchConditions is a list of conditions that must be met for a request to be sent to this webhook. Match conditions filter requests that have already been matched by the rules, namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. There are a maximum of 64 match conditions allowed.\n\nThe exact matching logic is (in order):\n 1. If ANY matchCondition evaluates to FALSE, the webhook is skipped.\n 2. If ALL matchConditions evaluate to TRUE, the webhook is called.\n 3. If any matchCondition evaluates to an error (but none are FALSE):\n - If failurePolicy=Fail, reject the request\n - If failurePolicy=Ignore, the error is ignored and the webhook is skipped\n\nThis is a beta feature and managed by the AdmissionWebhookMatchConditions feature gate.",
}
func (ValidatingWebhook) SwaggerDoc() map[string]string {
@@ -128,6 +280,28 @@ func (ValidatingWebhookConfigurationList) SwaggerDoc() map[string]string {
return map_ValidatingWebhookConfigurationList
}
+var map_Validation = map[string]string{
+ "": "Validation specifies the CEL expression which is used to apply the validation.",
+ "expression": "Expression represents the expression which will be evaluated by CEL. ref: https://github.com/google/cel-spec CEL expressions have access to the contents of the API request/response, organized into CEL variables as well as some other useful variables:\n\n- 'object' - The object from the incoming request. The value is null for DELETE requests. - 'oldObject' - The existing object. The value is null for CREATE requests. - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)). - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind. - 'namespaceObject' - The namespace object that the incoming object belongs to. The value is null for cluster-scoped resources. - 'variables' - Map of composited variables, from its name to its lazily evaluated value.\n For example, a variable named 'foo' can be accessed as 'variables.foo'.\n- 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request.\n See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz\n- 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the\n request resource.\n\nThe `apiVersion`, `kind`, `metadata.name` and `metadata.generateName` are always accessible from the root of the object. No other metadata properties are accessible.\n\nOnly property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible. Accessible property names are escaped according to the following rules when accessed in the expression: - '__' escapes to '__underscores__' - '.' escapes to '__dot__' - '-' escapes to '__dash__' - '/' escapes to '__slash__' - Property names that exactly match a CEL RESERVED keyword escape to '__{keyword}__'. The keywords are:\n\t \"true\", \"false\", \"null\", \"in\", \"as\", \"break\", \"const\", \"continue\", \"else\", \"for\", \"function\", \"if\",\n\t \"import\", \"let\", \"loop\", \"package\", \"namespace\", \"return\".\nExamples:\n - Expression accessing a property named \"namespace\": {\"Expression\": \"object.__namespace__ > 0\"}\n - Expression accessing a property named \"x-prop\": {\"Expression\": \"object.x__dash__prop > 0\"}\n - Expression accessing a property named \"redact__d\": {\"Expression\": \"object.redact__underscores__d > 0\"}\n\nEquality on arrays with list type of 'set' or 'map' ignores element order, i.e. [1, 2] == [2, 1]. Concatenation on arrays with x-kubernetes-list-type use the semantics of the list type:\n - 'set': `X + Y` performs a union where the array positions of all elements in `X` are preserved and\n non-intersecting elements in `Y` are appended, retaining their partial order.\n - 'map': `X + Y` performs a merge where the array positions of all keys in `X` are preserved but the values\n are overwritten by values in `Y` when the key sets of `X` and `Y` intersect. Elements in `Y` with\n non-intersecting keys are appended, retaining their partial order.\nRequired.",
+ "message": "Message represents the message displayed when validation fails. The message is required if the Expression contains line breaks. The message must not contain line breaks. If unset, the message is \"failed rule: {Rule}\". e.g. \"must be a URL with the host matching spec.host\" If the Expression contains line breaks. Message is required. The message must not contain line breaks. If unset, the message is \"failed Expression: {Expression}\".",
+ "reason": "Reason represents a machine-readable description of why this validation failed. If this is the first validation in the list to fail, this reason, as well as the corresponding HTTP response code, are used in the HTTP response to the client. The currently supported reasons are: \"Unauthorized\", \"Forbidden\", \"Invalid\", \"RequestEntityTooLarge\". If not set, StatusReasonInvalid is used in the response to the client.",
+ "messageExpression": "messageExpression declares a CEL expression that evaluates to the validation failure message that is returned when this rule fails. Since messageExpression is used as a failure message, it must evaluate to a string. If both message and messageExpression are present on a validation, then messageExpression will be used if validation fails. If messageExpression results in a runtime error, the runtime error is logged, and the validation failure message is produced as if the messageExpression field were unset. If messageExpression evaluates to an empty string, a string with only spaces, or a string that contains line breaks, then the validation failure message will also be produced as if the messageExpression field were unset, and the fact that messageExpression produced an empty string/string with only spaces/string with line breaks will be logged. messageExpression has access to all the same variables as the `expression` except for 'authorizer' and 'authorizer.requestResource'. Example: \"object.x must be less than max (\"+string(params.max)+\")\"",
+}
+
+func (Validation) SwaggerDoc() map[string]string {
+ return map_Validation
+}
+
+var map_Variable = map[string]string{
+ "": "Variable is the definition of a variable that is used for composition. A variable is defined as a named expression.",
+ "name": "Name is the name of the variable. The name must be a valid CEL identifier and unique among all variables. The variable can be accessed in other expressions through `variables` For example, if name is \"foo\", the variable will be available as `variables.foo`",
+ "expression": "Expression is the expression that will be evaluated as the value of the variable. The CEL expression has access to the same identifiers as the CEL expressions in Validation.",
+}
+
+func (Variable) SwaggerDoc() map[string]string {
+ return map_Variable
+}
+
var map_WebhookClientConfig = map[string]string{
"": "WebhookClientConfig contains the information to make a TLS connection with the webhook",
"url": "`url` gives the location of the webhook, in standard URL form (`scheme://host:port/path`). Exactly one of `url` or `service` must be specified.\n\nThe `host` should not refer to a service running in the cluster; use the `service` field instead. The host might be resolved via external DNS in some apiservers (e.g., `kube-apiserver` cannot resolve in-cluster DNS as that would be a layering violation). `host` may also be an IP address.\n\nPlease note that using `localhost` or `127.0.0.1` as a `host` is risky unless you take great care to run this webhook on all hosts which run an apiserver which might need to make calls to this webhook. Such installs are likely to be non-portable, i.e., not easy to turn up in a new cluster.\n\nThe scheme must be \"https\"; the URL must begin with \"https://\".\n\nA path is optional, and if present may be any string permissible in a URL. You may use the path to pass an arbitrary string to the webhook, for example, a cluster identifier.\n\nAttempting to use a user or basic auth e.g. \"user:password@\" is not allowed. Fragments (\"#...\") and query parameters (\"?...\") are not allowed, either.",
diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.deepcopy.go
index 9c5299bdfa..4c10b1d113 100644
--- a/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.deepcopy.go
+++ b/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.deepcopy.go
@@ -22,11 +22,43 @@ limitations under the License.
package v1beta1
import (
- v1 "k8s.io/api/admissionregistration/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ admissionregistrationv1 "k8s.io/api/admissionregistration/v1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AuditAnnotation) DeepCopyInto(out *AuditAnnotation) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuditAnnotation.
+func (in *AuditAnnotation) DeepCopy() *AuditAnnotation {
+ if in == nil {
+ return nil
+ }
+ out := new(AuditAnnotation)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ExpressionWarning) DeepCopyInto(out *ExpressionWarning) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExpressionWarning.
+func (in *ExpressionWarning) DeepCopy() *ExpressionWarning {
+ if in == nil {
+ return nil
+ }
+ out := new(ExpressionWarning)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MatchCondition) DeepCopyInto(out *MatchCondition) {
*out = *in
@@ -43,13 +75,58 @@ func (in *MatchCondition) DeepCopy() *MatchCondition {
return out
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *MatchResources) DeepCopyInto(out *MatchResources) {
+ *out = *in
+ if in.NamespaceSelector != nil {
+ in, out := &in.NamespaceSelector, &out.NamespaceSelector
+ *out = new(v1.LabelSelector)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.ObjectSelector != nil {
+ in, out := &in.ObjectSelector, &out.ObjectSelector
+ *out = new(v1.LabelSelector)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.ResourceRules != nil {
+ in, out := &in.ResourceRules, &out.ResourceRules
+ *out = make([]NamedRuleWithOperations, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.ExcludeResourceRules != nil {
+ in, out := &in.ExcludeResourceRules, &out.ExcludeResourceRules
+ *out = make([]NamedRuleWithOperations, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.MatchPolicy != nil {
+ in, out := &in.MatchPolicy, &out.MatchPolicy
+ *out = new(MatchPolicyType)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MatchResources.
+func (in *MatchResources) DeepCopy() *MatchResources {
+ if in == nil {
+ return nil
+ }
+ out := new(MatchResources)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MutatingWebhook) DeepCopyInto(out *MutatingWebhook) {
*out = *in
in.ClientConfig.DeepCopyInto(&out.ClientConfig)
if in.Rules != nil {
in, out := &in.Rules, &out.Rules
- *out = make([]v1.RuleWithOperations, len(*in))
+ *out = make([]admissionregistrationv1.RuleWithOperations, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
@@ -66,12 +143,12 @@ func (in *MutatingWebhook) DeepCopyInto(out *MutatingWebhook) {
}
if in.NamespaceSelector != nil {
in, out := &in.NamespaceSelector, &out.NamespaceSelector
- *out = new(metav1.LabelSelector)
+ *out = new(v1.LabelSelector)
(*in).DeepCopyInto(*out)
}
if in.ObjectSelector != nil {
in, out := &in.ObjectSelector, &out.ObjectSelector
- *out = new(metav1.LabelSelector)
+ *out = new(v1.LabelSelector)
(*in).DeepCopyInto(*out)
}
if in.SideEffects != nil {
@@ -178,6 +255,70 @@ func (in *MutatingWebhookConfigurationList) DeepCopyObject() runtime.Object {
return nil
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NamedRuleWithOperations) DeepCopyInto(out *NamedRuleWithOperations) {
+ *out = *in
+ if in.ResourceNames != nil {
+ in, out := &in.ResourceNames, &out.ResourceNames
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ in.RuleWithOperations.DeepCopyInto(&out.RuleWithOperations)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedRuleWithOperations.
+func (in *NamedRuleWithOperations) DeepCopy() *NamedRuleWithOperations {
+ if in == nil {
+ return nil
+ }
+ out := new(NamedRuleWithOperations)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ParamKind) DeepCopyInto(out *ParamKind) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParamKind.
+func (in *ParamKind) DeepCopy() *ParamKind {
+ if in == nil {
+ return nil
+ }
+ out := new(ParamKind)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ParamRef) DeepCopyInto(out *ParamRef) {
+ *out = *in
+ if in.Selector != nil {
+ in, out := &in.Selector, &out.Selector
+ *out = new(v1.LabelSelector)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.ParameterNotFoundAction != nil {
+ in, out := &in.ParameterNotFoundAction, &out.ParameterNotFoundAction
+ *out = new(ParameterNotFoundActionType)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParamRef.
+func (in *ParamRef) DeepCopy() *ParamRef {
+ if in == nil {
+ return nil
+ }
+ out := new(ParamRef)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ServiceReference) DeepCopyInto(out *ServiceReference) {
*out = *in
@@ -204,13 +345,267 @@ func (in *ServiceReference) DeepCopy() *ServiceReference {
return out
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TypeChecking) DeepCopyInto(out *TypeChecking) {
+ *out = *in
+ if in.ExpressionWarnings != nil {
+ in, out := &in.ExpressionWarnings, &out.ExpressionWarnings
+ *out = make([]ExpressionWarning, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TypeChecking.
+func (in *TypeChecking) DeepCopy() *TypeChecking {
+ if in == nil {
+ return nil
+ }
+ out := new(TypeChecking)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ValidatingAdmissionPolicy) DeepCopyInto(out *ValidatingAdmissionPolicy) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidatingAdmissionPolicy.
+func (in *ValidatingAdmissionPolicy) DeepCopy() *ValidatingAdmissionPolicy {
+ if in == nil {
+ return nil
+ }
+ out := new(ValidatingAdmissionPolicy)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ValidatingAdmissionPolicy) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ValidatingAdmissionPolicyBinding) DeepCopyInto(out *ValidatingAdmissionPolicyBinding) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidatingAdmissionPolicyBinding.
+func (in *ValidatingAdmissionPolicyBinding) DeepCopy() *ValidatingAdmissionPolicyBinding {
+ if in == nil {
+ return nil
+ }
+ out := new(ValidatingAdmissionPolicyBinding)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ValidatingAdmissionPolicyBinding) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ValidatingAdmissionPolicyBindingList) DeepCopyInto(out *ValidatingAdmissionPolicyBindingList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]ValidatingAdmissionPolicyBinding, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidatingAdmissionPolicyBindingList.
+func (in *ValidatingAdmissionPolicyBindingList) DeepCopy() *ValidatingAdmissionPolicyBindingList {
+ if in == nil {
+ return nil
+ }
+ out := new(ValidatingAdmissionPolicyBindingList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ValidatingAdmissionPolicyBindingList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ValidatingAdmissionPolicyBindingSpec) DeepCopyInto(out *ValidatingAdmissionPolicyBindingSpec) {
+ *out = *in
+ if in.ParamRef != nil {
+ in, out := &in.ParamRef, &out.ParamRef
+ *out = new(ParamRef)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.MatchResources != nil {
+ in, out := &in.MatchResources, &out.MatchResources
+ *out = new(MatchResources)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.ValidationActions != nil {
+ in, out := &in.ValidationActions, &out.ValidationActions
+ *out = make([]ValidationAction, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidatingAdmissionPolicyBindingSpec.
+func (in *ValidatingAdmissionPolicyBindingSpec) DeepCopy() *ValidatingAdmissionPolicyBindingSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(ValidatingAdmissionPolicyBindingSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ValidatingAdmissionPolicyList) DeepCopyInto(out *ValidatingAdmissionPolicyList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]ValidatingAdmissionPolicy, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidatingAdmissionPolicyList.
+func (in *ValidatingAdmissionPolicyList) DeepCopy() *ValidatingAdmissionPolicyList {
+ if in == nil {
+ return nil
+ }
+ out := new(ValidatingAdmissionPolicyList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ValidatingAdmissionPolicyList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ValidatingAdmissionPolicySpec) DeepCopyInto(out *ValidatingAdmissionPolicySpec) {
+ *out = *in
+ if in.ParamKind != nil {
+ in, out := &in.ParamKind, &out.ParamKind
+ *out = new(ParamKind)
+ **out = **in
+ }
+ if in.MatchConstraints != nil {
+ in, out := &in.MatchConstraints, &out.MatchConstraints
+ *out = new(MatchResources)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Validations != nil {
+ in, out := &in.Validations, &out.Validations
+ *out = make([]Validation, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.FailurePolicy != nil {
+ in, out := &in.FailurePolicy, &out.FailurePolicy
+ *out = new(FailurePolicyType)
+ **out = **in
+ }
+ if in.AuditAnnotations != nil {
+ in, out := &in.AuditAnnotations, &out.AuditAnnotations
+ *out = make([]AuditAnnotation, len(*in))
+ copy(*out, *in)
+ }
+ if in.MatchConditions != nil {
+ in, out := &in.MatchConditions, &out.MatchConditions
+ *out = make([]MatchCondition, len(*in))
+ copy(*out, *in)
+ }
+ if in.Variables != nil {
+ in, out := &in.Variables, &out.Variables
+ *out = make([]Variable, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidatingAdmissionPolicySpec.
+func (in *ValidatingAdmissionPolicySpec) DeepCopy() *ValidatingAdmissionPolicySpec {
+ if in == nil {
+ return nil
+ }
+ out := new(ValidatingAdmissionPolicySpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ValidatingAdmissionPolicyStatus) DeepCopyInto(out *ValidatingAdmissionPolicyStatus) {
+ *out = *in
+ if in.TypeChecking != nil {
+ in, out := &in.TypeChecking, &out.TypeChecking
+ *out = new(TypeChecking)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]v1.Condition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidatingAdmissionPolicyStatus.
+func (in *ValidatingAdmissionPolicyStatus) DeepCopy() *ValidatingAdmissionPolicyStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(ValidatingAdmissionPolicyStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ValidatingWebhook) DeepCopyInto(out *ValidatingWebhook) {
*out = *in
in.ClientConfig.DeepCopyInto(&out.ClientConfig)
if in.Rules != nil {
in, out := &in.Rules, &out.Rules
- *out = make([]v1.RuleWithOperations, len(*in))
+ *out = make([]admissionregistrationv1.RuleWithOperations, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
@@ -227,12 +622,12 @@ func (in *ValidatingWebhook) DeepCopyInto(out *ValidatingWebhook) {
}
if in.NamespaceSelector != nil {
in, out := &in.NamespaceSelector, &out.NamespaceSelector
- *out = new(metav1.LabelSelector)
+ *out = new(v1.LabelSelector)
(*in).DeepCopyInto(*out)
}
if in.ObjectSelector != nil {
in, out := &in.ObjectSelector, &out.ObjectSelector
- *out = new(metav1.LabelSelector)
+ *out = new(v1.LabelSelector)
(*in).DeepCopyInto(*out)
}
if in.SideEffects != nil {
@@ -334,6 +729,43 @@ func (in *ValidatingWebhookConfigurationList) DeepCopyObject() runtime.Object {
return nil
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Validation) DeepCopyInto(out *Validation) {
+ *out = *in
+ if in.Reason != nil {
+ in, out := &in.Reason, &out.Reason
+ *out = new(v1.StatusReason)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Validation.
+func (in *Validation) DeepCopy() *Validation {
+ if in == nil {
+ return nil
+ }
+ out := new(Validation)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Variable) DeepCopyInto(out *Variable) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Variable.
+func (in *Variable) DeepCopy() *Variable {
+ if in == nil {
+ return nil
+ }
+ out := new(Variable)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *WebhookClientConfig) DeepCopyInto(out *WebhookClientConfig) {
*out = *in
diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.prerelease-lifecycle.go
index 09a92f4768..c1be5122a8 100644
--- a/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.prerelease-lifecycle.go
+++ b/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.prerelease-lifecycle.go
@@ -73,6 +73,78 @@ func (in *MutatingWebhookConfigurationList) APILifecycleRemoved() (major, minor
return 1, 22
}
+// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
+func (in *ValidatingAdmissionPolicy) APILifecycleIntroduced() (major, minor int) {
+ return 1, 28
+}
+
+// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
+func (in *ValidatingAdmissionPolicy) APILifecycleDeprecated() (major, minor int) {
+ return 1, 31
+}
+
+// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
+func (in *ValidatingAdmissionPolicy) APILifecycleRemoved() (major, minor int) {
+ return 1, 34
+}
+
+// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
+func (in *ValidatingAdmissionPolicyBinding) APILifecycleIntroduced() (major, minor int) {
+ return 1, 28
+}
+
+// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
+func (in *ValidatingAdmissionPolicyBinding) APILifecycleDeprecated() (major, minor int) {
+ return 1, 31
+}
+
+// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
+func (in *ValidatingAdmissionPolicyBinding) APILifecycleRemoved() (major, minor int) {
+ return 1, 34
+}
+
+// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
+func (in *ValidatingAdmissionPolicyBindingList) APILifecycleIntroduced() (major, minor int) {
+ return 1, 28
+}
+
+// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
+func (in *ValidatingAdmissionPolicyBindingList) APILifecycleDeprecated() (major, minor int) {
+ return 1, 31
+}
+
+// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
+func (in *ValidatingAdmissionPolicyBindingList) APILifecycleRemoved() (major, minor int) {
+ return 1, 34
+}
+
+// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
+func (in *ValidatingAdmissionPolicyList) APILifecycleIntroduced() (major, minor int) {
+ return 1, 28
+}
+
+// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
+func (in *ValidatingAdmissionPolicyList) APILifecycleDeprecated() (major, minor int) {
+ return 1, 31
+}
+
+// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
+func (in *ValidatingAdmissionPolicyList) APILifecycleRemoved() (major, minor int) {
+ return 1, 34
+}
+
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
func (in *ValidatingWebhookConfiguration) APILifecycleIntroduced() (major, minor int) {
diff --git a/vendor/k8s.io/api/apiserverinternal/v1alpha1/generated.pb.go b/vendor/k8s.io/api/apiserverinternal/v1alpha1/generated.pb.go
index 4effbc6c17..6871da414c 100644
--- a/vendor/k8s.io/api/apiserverinternal/v1alpha1/generated.pb.go
+++ b/vendor/k8s.io/api/apiserverinternal/v1alpha1/generated.pb.go
@@ -225,55 +225,57 @@ func init() {
}
var fileDescriptor_a3903ff5e3cc7a03 = []byte{
- // 768 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x55, 0xdf, 0x4e, 0x13, 0x4d,
- 0x14, 0xef, 0xd2, 0x52, 0x60, 0xfa, 0x7d, 0xf4, 0x63, 0x3e, 0x08, 0xb5, 0x26, 0x5b, 0x6c, 0x82,
- 0x41, 0x8d, 0xbb, 0xd2, 0x88, 0x91, 0x98, 0x68, 0x58, 0x20, 0x06, 0x05, 0x31, 0x03, 0xf1, 0x02,
- 0xbd, 0x70, 0xba, 0x3b, 0x6e, 0xd7, 0x76, 0x77, 0x36, 0x3b, 0xd3, 0x26, 0xdc, 0x18, 0x1f, 0xc1,
- 0x07, 0xf1, 0xd2, 0x87, 0xe0, 0xca, 0x70, 0x63, 0x42, 0x62, 0xd2, 0xc8, 0xfa, 0x16, 0x5c, 0x99,
- 0x99, 0xdd, 0xb6, 0x6c, 0xbb, 0xc4, 0x86, 0x8b, 0x26, 0x9d, 0x73, 0xce, 0xef, 0x77, 0xfe, 0xcc,
- 0x6f, 0xce, 0x82, 0x57, 0xcd, 0xc7, 0x4c, 0x73, 0xa8, 0xde, 0x6c, 0xd7, 0x49, 0xe0, 0x11, 0x4e,
- 0x98, 0xde, 0x21, 0x9e, 0x45, 0x03, 0x3d, 0x76, 0x60, 0xdf, 0x11, 0x3f, 0x46, 0x82, 0x0e, 0x09,
- 0x1c, 0x8f, 0x93, 0xc0, 0xc3, 0x2d, 0xbd, 0xb3, 0x8a, 0x5b, 0x7e, 0x03, 0xaf, 0xea, 0x36, 0xf1,
- 0x48, 0x80, 0x39, 0xb1, 0x34, 0x3f, 0xa0, 0x9c, 0xc2, 0xe5, 0x08, 0xa6, 0x61, 0xdf, 0xd1, 0x46,
- 0x60, 0x5a, 0x0f, 0x56, 0xbe, 0x6f, 0x3b, 0xbc, 0xd1, 0xae, 0x6b, 0x26, 0x75, 0x75, 0x9b, 0xda,
- 0x54, 0x97, 0xe8, 0x7a, 0xfb, 0x83, 0x3c, 0xc9, 0x83, 0xfc, 0x17, 0xb1, 0x96, 0x1f, 0x0e, 0x8a,
- 0x71, 0xb1, 0xd9, 0x70, 0x3c, 0x12, 0x1c, 0xeb, 0x7e, 0xd3, 0x96, 0x95, 0xe9, 0x2e, 0xe1, 0x58,
- 0xef, 0x8c, 0xd4, 0x52, 0xd6, 0xaf, 0x42, 0x05, 0x6d, 0x8f, 0x3b, 0x2e, 0x19, 0x01, 0x3c, 0xfa,
- 0x1b, 0x80, 0x99, 0x0d, 0xe2, 0xe2, 0x61, 0x5c, 0xf5, 0x87, 0x02, 0xe6, 0x0f, 0x64, 0xa7, 0x07,
- 0x9c, 0x06, 0xd8, 0x26, 0x6f, 0x48, 0xc0, 0x1c, 0xea, 0xc1, 0x35, 0x50, 0xc0, 0xbe, 0x13, 0xb9,
- 0x76, 0xb6, 0x4a, 0xca, 0x92, 0xb2, 0x32, 0x63, 0xfc, 0x7f, 0xd2, 0xad, 0x64, 0xc2, 0x6e, 0xa5,
- 0xb0, 0xf1, 0x7a, 0xa7, 0xe7, 0x42, 0x97, 0xe3, 0xe0, 0x06, 0x28, 0x12, 0xcf, 0xa4, 0x96, 0xe3,
- 0xd9, 0x31, 0x53, 0x69, 0x42, 0x42, 0x17, 0x63, 0x68, 0x71, 0x3b, 0xe9, 0x46, 0xc3, 0xf1, 0x70,
- 0x13, 0xcc, 0x59, 0xc4, 0xa4, 0x16, 0xae, 0xb7, 0x7a, 0xd5, 0xb0, 0x52, 0x76, 0x29, 0xbb, 0x32,
- 0x63, 0x2c, 0x84, 0xdd, 0xca, 0xdc, 0xd6, 0xb0, 0x13, 0x8d, 0xc6, 0x57, 0xbf, 0x4d, 0x80, 0xd9,
- 0xa1, 0x8e, 0xde, 0x83, 0x69, 0x31, 0x6e, 0x0b, 0x73, 0x2c, 0xdb, 0x29, 0xd4, 0x1e, 0x68, 0x83,
- 0x2b, 0xef, 0x4f, 0x4d, 0xf3, 0x9b, 0xb6, 0xbc, 0x7f, 0x4d, 0x44, 0x6b, 0x9d, 0x55, 0x6d, 0xbf,
- 0xfe, 0x91, 0x98, 0x7c, 0x8f, 0x70, 0x6c, 0xc0, 0xb8, 0x0b, 0x30, 0xb0, 0xa1, 0x3e, 0x2b, 0x7c,
- 0x0b, 0x72, 0xcc, 0x27, 0xa6, 0xec, 0xb8, 0x50, 0x5b, 0xd7, 0xc6, 0x12, 0x94, 0x96, 0x2c, 0xf3,
- 0xc0, 0x27, 0xa6, 0xf1, 0x4f, 0x9c, 0x26, 0x27, 0x4e, 0x48, 0x92, 0x42, 0x13, 0xe4, 0x19, 0xc7,
- 0xbc, 0x2d, 0x66, 0x21, 0xe8, 0x9f, 0x5c, 0x8f, 0x5e, 0x52, 0x18, 0xb3, 0x71, 0x82, 0x7c, 0x74,
- 0x46, 0x31, 0x75, 0xf5, 0x6b, 0x16, 0x2c, 0x26, 0x01, 0x9b, 0xd4, 0xb3, 0x1c, 0x2e, 0xe6, 0xf7,
- 0x0c, 0xe4, 0xf8, 0xb1, 0x4f, 0x62, 0x29, 0xdc, 0xeb, 0x95, 0x78, 0x78, 0xec, 0x93, 0x8b, 0x6e,
- 0xe5, 0xe6, 0x15, 0x30, 0xe1, 0x46, 0x12, 0x08, 0xd7, 0xfb, 0x1d, 0x44, 0x92, 0xb8, 0x95, 0x2c,
- 0xe2, 0xa2, 0x5b, 0x29, 0xf6, 0x61, 0xc9, 0xba, 0xe0, 0x0b, 0x00, 0x69, 0x5d, 0x76, 0x68, 0x3d,
- 0x8f, 0x14, 0x2c, 0x94, 0x25, 0x06, 0x91, 0x35, 0xca, 0x31, 0x0d, 0xdc, 0x1f, 0x89, 0x40, 0x29,
- 0x28, 0xd8, 0x01, 0xb0, 0x85, 0x19, 0x3f, 0x0c, 0xb0, 0xc7, 0xa2, 0x12, 0x1d, 0x97, 0x94, 0x72,
- 0x72, 0xa8, 0x77, 0xc7, 0x53, 0x84, 0x40, 0x0c, 0xf2, 0xee, 0x8e, 0xb0, 0xa1, 0x94, 0x0c, 0xf0,
- 0x36, 0xc8, 0x07, 0x04, 0x33, 0xea, 0x95, 0x26, 0x65, 0xfb, 0xfd, 0x3b, 0x40, 0xd2, 0x8a, 0x62,
- 0x2f, 0xbc, 0x03, 0xa6, 0x5c, 0xc2, 0x18, 0xb6, 0x49, 0x29, 0x2f, 0x03, 0x8b, 0x71, 0xe0, 0xd4,
- 0x5e, 0x64, 0x46, 0x3d, 0x7f, 0xf5, 0xbb, 0x02, 0x60, 0x72, 0xee, 0xbb, 0x0e, 0xe3, 0xf0, 0xdd,
- 0x88, 0xd2, 0xb5, 0xf1, 0xfa, 0x12, 0x68, 0xa9, 0xf3, 0xff, 0xe2, 0x94, 0xd3, 0x3d, 0xcb, 0x25,
- 0x95, 0x1f, 0x81, 0x49, 0x87, 0x13, 0x57, 0xdc, 0x62, 0x76, 0xa5, 0x50, 0x5b, 0xbb, 0x96, 0x0e,
- 0x8d, 0x7f, 0xe3, 0x0c, 0x93, 0x3b, 0x82, 0x0b, 0x45, 0x94, 0xd5, 0xf9, 0xe1, 0x7e, 0xc4, 0x03,
- 0xa8, 0xfe, 0x9c, 0x00, 0xf3, 0x69, 0x32, 0x86, 0x9f, 0x40, 0x91, 0x25, 0xec, 0xac, 0xa4, 0xc8,
- 0xa2, 0xc6, 0x7e, 0x1c, 0x29, 0xab, 0x6f, 0xb0, 0xaa, 0x92, 0x76, 0x86, 0x86, 0x93, 0xc1, 0x7d,
- 0xb0, 0x60, 0x52, 0xd7, 0xa5, 0xde, 0x76, 0xea, 0xce, 0xbb, 0x11, 0x76, 0x2b, 0x0b, 0x9b, 0x69,
- 0x01, 0x28, 0x1d, 0x07, 0x03, 0x00, 0xcc, 0xde, 0x13, 0x88, 0x96, 0x5e, 0xa1, 0xf6, 0xf4, 0x5a,
- 0x03, 0xee, 0xbf, 0xa4, 0xc1, 0xce, 0xea, 0x9b, 0x18, 0xba, 0x94, 0xc5, 0x78, 0x79, 0x72, 0xae,
- 0x66, 0x4e, 0xcf, 0xd5, 0xcc, 0xd9, 0xb9, 0x9a, 0xf9, 0x1c, 0xaa, 0xca, 0x49, 0xa8, 0x2a, 0xa7,
- 0xa1, 0xaa, 0x9c, 0x85, 0xaa, 0xf2, 0x2b, 0x54, 0x95, 0x2f, 0xbf, 0xd5, 0xcc, 0xd1, 0xf2, 0x58,
- 0x1f, 0xd5, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xa0, 0xd0, 0x65, 0xbc, 0x95, 0x07, 0x00, 0x00,
+ // 790 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x55, 0x41, 0x4f, 0xdb, 0x48,
+ 0x14, 0x8e, 0x49, 0x08, 0x30, 0xd9, 0x4d, 0x96, 0x59, 0x10, 0xd9, 0xac, 0xe4, 0xb0, 0x91, 0x58,
+ 0xb1, 0xbb, 0x5a, 0x7b, 0x89, 0x96, 0xaa, 0xb4, 0x52, 0x2b, 0x0c, 0xa8, 0xa2, 0x85, 0x52, 0x4d,
+ 0x50, 0x0f, 0xb4, 0x87, 0x4e, 0xec, 0xa9, 0xe3, 0x26, 0xf6, 0x58, 0x9e, 0x49, 0x24, 0x2e, 0x55,
+ 0x7f, 0x42, 0xfb, 0x3f, 0x7a, 0xec, 0x8f, 0xe0, 0x54, 0x71, 0x44, 0xaa, 0x14, 0x15, 0xf7, 0x5f,
+ 0x70, 0xaa, 0x66, 0xec, 0x38, 0x38, 0x09, 0x6a, 0xc4, 0x21, 0x52, 0xe6, 0xbd, 0xf7, 0x7d, 0xef,
+ 0xcd, 0x37, 0xdf, 0x8c, 0xc1, 0xd3, 0xf6, 0x5d, 0xa6, 0x39, 0x54, 0x6f, 0x77, 0x9b, 0x24, 0xf0,
+ 0x08, 0x27, 0x4c, 0xef, 0x11, 0xcf, 0xa2, 0x81, 0x1e, 0x27, 0xb0, 0xef, 0x88, 0x1f, 0x23, 0x41,
+ 0x8f, 0x04, 0x8e, 0xc7, 0x49, 0xe0, 0xe1, 0x8e, 0xde, 0xdb, 0xc0, 0x1d, 0xbf, 0x85, 0x37, 0x74,
+ 0x9b, 0x78, 0x24, 0xc0, 0x9c, 0x58, 0x9a, 0x1f, 0x50, 0x4e, 0xe1, 0x5a, 0x04, 0xd3, 0xb0, 0xef,
+ 0x68, 0x63, 0x30, 0x6d, 0x00, 0xab, 0xfc, 0x6b, 0x3b, 0xbc, 0xd5, 0x6d, 0x6a, 0x26, 0x75, 0x75,
+ 0x9b, 0xda, 0x54, 0x97, 0xe8, 0x66, 0xf7, 0xb5, 0x5c, 0xc9, 0x85, 0xfc, 0x17, 0xb1, 0x56, 0xfe,
+ 0x1f, 0x0e, 0xe3, 0x62, 0xb3, 0xe5, 0x78, 0x24, 0x38, 0xd5, 0xfd, 0xb6, 0x2d, 0x27, 0xd3, 0x5d,
+ 0xc2, 0xb1, 0xde, 0x1b, 0x9b, 0xa5, 0xa2, 0xdf, 0x84, 0x0a, 0xba, 0x1e, 0x77, 0x5c, 0x32, 0x06,
+ 0xb8, 0xf3, 0x23, 0x00, 0x33, 0x5b, 0xc4, 0xc5, 0xa3, 0xb8, 0xda, 0x87, 0x19, 0xb0, 0xd4, 0x90,
+ 0x3b, 0x6d, 0x70, 0x1a, 0x60, 0x9b, 0x3c, 0x27, 0x01, 0x73, 0xa8, 0x07, 0x37, 0x41, 0x01, 0xfb,
+ 0x4e, 0x94, 0xda, 0xdf, 0x2d, 0x2b, 0xab, 0xca, 0xfa, 0x82, 0xf1, 0xeb, 0x59, 0xbf, 0x9a, 0x09,
+ 0xfb, 0xd5, 0xc2, 0xf6, 0xb3, 0xfd, 0x41, 0x0a, 0x5d, 0xaf, 0x83, 0xdb, 0xa0, 0x44, 0x3c, 0x93,
+ 0x5a, 0x8e, 0x67, 0xc7, 0x4c, 0xe5, 0x19, 0x09, 0x5d, 0x89, 0xa1, 0xa5, 0xbd, 0x74, 0x1a, 0x8d,
+ 0xd6, 0xc3, 0x1d, 0xb0, 0x68, 0x11, 0x93, 0x5a, 0xb8, 0xd9, 0x19, 0x4c, 0xc3, 0xca, 0xd9, 0xd5,
+ 0xec, 0xfa, 0x82, 0xb1, 0x1c, 0xf6, 0xab, 0x8b, 0xbb, 0xa3, 0x49, 0x34, 0x5e, 0x0f, 0xef, 0x81,
+ 0xa2, 0x3c, 0x40, 0x2b, 0x61, 0xc8, 0x49, 0x06, 0x18, 0xf6, 0xab, 0xc5, 0x46, 0x2a, 0x83, 0x46,
+ 0x2a, 0x6b, 0x9f, 0x66, 0x40, 0x71, 0x44, 0x8d, 0x57, 0x60, 0x5e, 0x1c, 0x95, 0x85, 0x39, 0x96,
+ 0x52, 0x14, 0xea, 0xff, 0x69, 0x43, 0xbb, 0x24, 0x8a, 0x6b, 0x7e, 0xdb, 0x96, 0xde, 0xd1, 0x44,
+ 0xb5, 0xd6, 0xdb, 0xd0, 0x8e, 0x9a, 0x6f, 0x88, 0xc9, 0x0f, 0x09, 0xc7, 0x06, 0x8c, 0x15, 0x00,
+ 0xc3, 0x18, 0x4a, 0x58, 0xe1, 0x0b, 0x90, 0x63, 0x3e, 0x31, 0xa5, 0x5a, 0x85, 0xfa, 0x96, 0x36,
+ 0x95, 0x19, 0xb5, 0xf4, 0x98, 0x0d, 0x9f, 0x98, 0xc6, 0x4f, 0x71, 0x9b, 0x9c, 0x58, 0x21, 0x49,
+ 0x0a, 0x4d, 0x90, 0x67, 0x1c, 0xf3, 0xae, 0xd0, 0x51, 0xd0, 0xdf, 0xbf, 0x1d, 0xbd, 0xa4, 0x30,
+ 0x8a, 0x71, 0x83, 0x7c, 0xb4, 0x46, 0x31, 0x75, 0xed, 0x63, 0x16, 0xac, 0xa4, 0x01, 0x3b, 0xd4,
+ 0xb3, 0x1c, 0x2e, 0xf4, 0x7b, 0x08, 0x72, 0xfc, 0xd4, 0x27, 0xb1, 0x8d, 0xfe, 0x19, 0x8c, 0x78,
+ 0x7c, 0xea, 0x93, 0xab, 0x7e, 0xf5, 0xf7, 0x1b, 0x60, 0x22, 0x8d, 0x24, 0x10, 0x6e, 0x25, 0x3b,
+ 0x88, 0xec, 0xf4, 0x47, 0x7a, 0x88, 0xab, 0x7e, 0xb5, 0x94, 0xc0, 0xd2, 0x73, 0xc1, 0xc7, 0x00,
+ 0xd2, 0x66, 0x74, 0xc4, 0x8f, 0x22, 0xf7, 0x0b, 0x57, 0x0a, 0x21, 0xb2, 0x46, 0x25, 0xa6, 0x81,
+ 0x47, 0x63, 0x15, 0x68, 0x02, 0x0a, 0xf6, 0x00, 0xec, 0x60, 0xc6, 0x8f, 0x03, 0xec, 0xb1, 0x68,
+ 0x44, 0xc7, 0x25, 0xe5, 0x9c, 0x14, 0xf5, 0xef, 0xe9, 0x1c, 0x21, 0x10, 0xc3, 0xbe, 0x07, 0x63,
+ 0x6c, 0x68, 0x42, 0x07, 0xf8, 0x27, 0xc8, 0x07, 0x04, 0x33, 0xea, 0x95, 0x67, 0xe5, 0xf6, 0x93,
+ 0x33, 0x40, 0x32, 0x8a, 0xe2, 0x2c, 0xfc, 0x0b, 0xcc, 0xb9, 0x84, 0x31, 0x6c, 0x93, 0x72, 0x5e,
+ 0x16, 0x96, 0xe2, 0xc2, 0xb9, 0xc3, 0x28, 0x8c, 0x06, 0xf9, 0xda, 0x67, 0x05, 0xc0, 0xb4, 0xee,
+ 0x07, 0x0e, 0xe3, 0xf0, 0xe5, 0x98, 0xd3, 0xb5, 0xe9, 0xf6, 0x25, 0xd0, 0xd2, 0xe7, 0xbf, 0xc4,
+ 0x2d, 0xe7, 0x07, 0x91, 0x6b, 0x2e, 0x3f, 0x01, 0xb3, 0x0e, 0x27, 0xae, 0x38, 0xc5, 0xec, 0x7a,
+ 0xa1, 0xbe, 0x79, 0x2b, 0x1f, 0x1a, 0x3f, 0xc7, 0x1d, 0x66, 0xf7, 0x05, 0x17, 0x8a, 0x28, 0x6b,
+ 0x4b, 0xa3, 0xfb, 0x11, 0x17, 0xa0, 0xf6, 0x45, 0x3c, 0x70, 0x13, 0x6c, 0x0c, 0xdf, 0x82, 0x12,
+ 0x4b, 0xc5, 0x59, 0x59, 0x91, 0x43, 0x4d, 0x7d, 0x39, 0x26, 0x3c, 0x9b, 0xc3, 0x67, 0x2e, 0x1d,
+ 0x67, 0x68, 0xb4, 0x19, 0x3c, 0x02, 0xcb, 0x26, 0x75, 0x5d, 0xea, 0xed, 0x4d, 0x7c, 0x2f, 0x7f,
+ 0x0b, 0xfb, 0xd5, 0xe5, 0x9d, 0x49, 0x05, 0x68, 0x32, 0x0e, 0x06, 0x00, 0x98, 0x83, 0x2b, 0x10,
+ 0x3d, 0x98, 0x85, 0xfa, 0x83, 0x5b, 0x09, 0x9c, 0xdc, 0xa4, 0xe1, 0x9b, 0x95, 0x84, 0x18, 0xba,
+ 0xd6, 0xc5, 0x78, 0x72, 0x76, 0xa9, 0x66, 0xce, 0x2f, 0xd5, 0xcc, 0xc5, 0xa5, 0x9a, 0x79, 0x17,
+ 0xaa, 0xca, 0x59, 0xa8, 0x2a, 0xe7, 0xa1, 0xaa, 0x5c, 0x84, 0xaa, 0xf2, 0x35, 0x54, 0x95, 0xf7,
+ 0xdf, 0xd4, 0xcc, 0xc9, 0xda, 0x54, 0x1f, 0xe4, 0xef, 0x01, 0x00, 0x00, 0xff, 0xff, 0xa0, 0x3a,
+ 0x2e, 0x07, 0xd1, 0x07, 0x00, 0x00,
}
func (m *ServerStorageVersion) Marshal() (dAtA []byte, err error) {
@@ -296,6 +298,15 @@ func (m *ServerStorageVersion) MarshalToSizedBuffer(dAtA []byte) (int, error) {
_ = i
var l int
_ = l
+ if len(m.ServedVersions) > 0 {
+ for iNdEx := len(m.ServedVersions) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.ServedVersions[iNdEx])
+ copy(dAtA[i:], m.ServedVersions[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServedVersions[iNdEx])))
+ i--
+ dAtA[i] = 0x22
+ }
+ }
if len(m.DecodableVersions) > 0 {
for iNdEx := len(m.DecodableVersions) - 1; iNdEx >= 0; iNdEx-- {
i -= len(m.DecodableVersions[iNdEx])
@@ -582,6 +593,12 @@ func (m *ServerStorageVersion) Size() (n int) {
n += 1 + l + sovGenerated(uint64(l))
}
}
+ if len(m.ServedVersions) > 0 {
+ for _, s := range m.ServedVersions {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
return n
}
@@ -685,6 +702,7 @@ func (this *ServerStorageVersion) String() string {
`APIServerID:` + fmt.Sprintf("%v", this.APIServerID) + `,`,
`EncodingVersion:` + fmt.Sprintf("%v", this.EncodingVersion) + `,`,
`DecodableVersions:` + fmt.Sprintf("%v", this.DecodableVersions) + `,`,
+ `ServedVersions:` + fmt.Sprintf("%v", this.ServedVersions) + `,`,
`}`,
}, "")
return s
@@ -896,6 +914,38 @@ func (m *ServerStorageVersion) Unmarshal(dAtA []byte) error {
}
m.DecodableVersions = append(m.DecodableVersions, string(dAtA[iNdEx:postIndex]))
iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ServedVersions", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ServedVersions = append(m.ServedVersions, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
diff --git a/vendor/k8s.io/api/apiserverinternal/v1alpha1/generated.proto b/vendor/k8s.io/api/apiserverinternal/v1alpha1/generated.proto
index 63c45d54d7..6e6bab5218 100644
--- a/vendor/k8s.io/api/apiserverinternal/v1alpha1/generated.proto
+++ b/vendor/k8s.io/api/apiserverinternal/v1alpha1/generated.proto
@@ -42,6 +42,11 @@ message ServerStorageVersion {
// The encodingVersion must be included in the decodableVersions.
// +listType=set
repeated string decodableVersions = 3;
+
+ // The API server can serve these versions.
+ // DecodableVersions must include all ServedVersions.
+ // +listType=set
+ repeated string servedVersions = 4;
}
// Storage version of a specific resource.
diff --git a/vendor/k8s.io/api/apiserverinternal/v1alpha1/types.go b/vendor/k8s.io/api/apiserverinternal/v1alpha1/types.go
index a0437b5074..0ffcf95f06 100644
--- a/vendor/k8s.io/api/apiserverinternal/v1alpha1/types.go
+++ b/vendor/k8s.io/api/apiserverinternal/v1alpha1/types.go
@@ -77,6 +77,11 @@ type ServerStorageVersion struct {
// The encodingVersion must be included in the decodableVersions.
// +listType=set
DecodableVersions []string `json:"decodableVersions,omitempty" protobuf:"bytes,3,opt,name=decodableVersions"`
+
+ // The API server can serve these versions.
+ // DecodableVersions must include all ServedVersions.
+ // +listType=set
+ ServedVersions []string `json:"servedVersions,omitempty" protobuf:"bytes,4,opt,name=servedVersions"`
}
type StorageVersionConditionType string
diff --git a/vendor/k8s.io/api/apiserverinternal/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/apiserverinternal/v1alpha1/types_swagger_doc_generated.go
index 3b75fa65bc..6fd1c3ebe8 100644
--- a/vendor/k8s.io/api/apiserverinternal/v1alpha1/types_swagger_doc_generated.go
+++ b/vendor/k8s.io/api/apiserverinternal/v1alpha1/types_swagger_doc_generated.go
@@ -32,6 +32,7 @@ var map_ServerStorageVersion = map[string]string{
"apiServerID": "The ID of the reporting API server.",
"encodingVersion": "The API server encodes the object to this version when persisting it in the backend (e.g., etcd).",
"decodableVersions": "The API server can decode objects encoded in these versions. The encodingVersion must be included in the decodableVersions.",
+ "servedVersions": "The API server can serve these versions. DecodableVersions must include all ServedVersions.",
}
func (ServerStorageVersion) SwaggerDoc() map[string]string {
diff --git a/vendor/k8s.io/api/apiserverinternal/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/apiserverinternal/v1alpha1/zz_generated.deepcopy.go
index 44dffa7512..638d801402 100644
--- a/vendor/k8s.io/api/apiserverinternal/v1alpha1/zz_generated.deepcopy.go
+++ b/vendor/k8s.io/api/apiserverinternal/v1alpha1/zz_generated.deepcopy.go
@@ -33,6 +33,11 @@ func (in *ServerStorageVersion) DeepCopyInto(out *ServerStorageVersion) {
*out = make([]string, len(*in))
copy(*out, *in)
}
+ if in.ServedVersions != nil {
+ in, out := &in.ServedVersions, &out.ServedVersions
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
return
}
diff --git a/vendor/k8s.io/api/apps/v1/types.go b/vendor/k8s.io/api/apps/v1/types.go
index 15dc3150a6..644d368fe4 100644
--- a/vendor/k8s.io/api/apps/v1/types.go
+++ b/vendor/k8s.io/api/apps/v1/types.go
@@ -17,7 +17,7 @@ limitations under the License.
package v1
import (
- "k8s.io/api/core/v1"
+ v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/intstr"
@@ -29,6 +29,7 @@ const (
DeprecatedRollbackTo = "deprecated.deployment.rollback.to"
DeprecatedTemplateGeneration = "deprecated.daemonset.template.generation"
StatefulSetPodNameLabel = "statefulset.kubernetes.io/pod-name"
+ PodIndexLabel = "apps.kubernetes.io/pod-index"
)
// +genclient
diff --git a/vendor/k8s.io/api/authentication/v1/generated.pb.go b/vendor/k8s.io/api/authentication/v1/generated.pb.go
index efbecf02c5..304bbd0744 100644
--- a/vendor/k8s.io/api/authentication/v1/generated.pb.go
+++ b/vendor/k8s.io/api/authentication/v1/generated.pb.go
@@ -102,10 +102,66 @@ func (m *ExtraValue) XXX_DiscardUnknown() {
var xxx_messageInfo_ExtraValue proto.InternalMessageInfo
+func (m *SelfSubjectReview) Reset() { *m = SelfSubjectReview{} }
+func (*SelfSubjectReview) ProtoMessage() {}
+func (*SelfSubjectReview) Descriptor() ([]byte, []int) {
+ return fileDescriptor_2953ea822e7ffe1e, []int{2}
+}
+func (m *SelfSubjectReview) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *SelfSubjectReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *SelfSubjectReview) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SelfSubjectReview.Merge(m, src)
+}
+func (m *SelfSubjectReview) XXX_Size() int {
+ return m.Size()
+}
+func (m *SelfSubjectReview) XXX_DiscardUnknown() {
+ xxx_messageInfo_SelfSubjectReview.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SelfSubjectReview proto.InternalMessageInfo
+
+func (m *SelfSubjectReviewStatus) Reset() { *m = SelfSubjectReviewStatus{} }
+func (*SelfSubjectReviewStatus) ProtoMessage() {}
+func (*SelfSubjectReviewStatus) Descriptor() ([]byte, []int) {
+ return fileDescriptor_2953ea822e7ffe1e, []int{3}
+}
+func (m *SelfSubjectReviewStatus) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *SelfSubjectReviewStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *SelfSubjectReviewStatus) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SelfSubjectReviewStatus.Merge(m, src)
+}
+func (m *SelfSubjectReviewStatus) XXX_Size() int {
+ return m.Size()
+}
+func (m *SelfSubjectReviewStatus) XXX_DiscardUnknown() {
+ xxx_messageInfo_SelfSubjectReviewStatus.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SelfSubjectReviewStatus proto.InternalMessageInfo
+
func (m *TokenRequest) Reset() { *m = TokenRequest{} }
func (*TokenRequest) ProtoMessage() {}
func (*TokenRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_2953ea822e7ffe1e, []int{2}
+ return fileDescriptor_2953ea822e7ffe1e, []int{4}
}
func (m *TokenRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -133,7 +189,7 @@ var xxx_messageInfo_TokenRequest proto.InternalMessageInfo
func (m *TokenRequestSpec) Reset() { *m = TokenRequestSpec{} }
func (*TokenRequestSpec) ProtoMessage() {}
func (*TokenRequestSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_2953ea822e7ffe1e, []int{3}
+ return fileDescriptor_2953ea822e7ffe1e, []int{5}
}
func (m *TokenRequestSpec) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -161,7 +217,7 @@ var xxx_messageInfo_TokenRequestSpec proto.InternalMessageInfo
func (m *TokenRequestStatus) Reset() { *m = TokenRequestStatus{} }
func (*TokenRequestStatus) ProtoMessage() {}
func (*TokenRequestStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_2953ea822e7ffe1e, []int{4}
+ return fileDescriptor_2953ea822e7ffe1e, []int{6}
}
func (m *TokenRequestStatus) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -189,7 +245,7 @@ var xxx_messageInfo_TokenRequestStatus proto.InternalMessageInfo
func (m *TokenReview) Reset() { *m = TokenReview{} }
func (*TokenReview) ProtoMessage() {}
func (*TokenReview) Descriptor() ([]byte, []int) {
- return fileDescriptor_2953ea822e7ffe1e, []int{5}
+ return fileDescriptor_2953ea822e7ffe1e, []int{7}
}
func (m *TokenReview) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -217,7 +273,7 @@ var xxx_messageInfo_TokenReview proto.InternalMessageInfo
func (m *TokenReviewSpec) Reset() { *m = TokenReviewSpec{} }
func (*TokenReviewSpec) ProtoMessage() {}
func (*TokenReviewSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_2953ea822e7ffe1e, []int{6}
+ return fileDescriptor_2953ea822e7ffe1e, []int{8}
}
func (m *TokenReviewSpec) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -245,7 +301,7 @@ var xxx_messageInfo_TokenReviewSpec proto.InternalMessageInfo
func (m *TokenReviewStatus) Reset() { *m = TokenReviewStatus{} }
func (*TokenReviewStatus) ProtoMessage() {}
func (*TokenReviewStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_2953ea822e7ffe1e, []int{7}
+ return fileDescriptor_2953ea822e7ffe1e, []int{9}
}
func (m *TokenReviewStatus) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -273,7 +329,7 @@ var xxx_messageInfo_TokenReviewStatus proto.InternalMessageInfo
func (m *UserInfo) Reset() { *m = UserInfo{} }
func (*UserInfo) ProtoMessage() {}
func (*UserInfo) Descriptor() ([]byte, []int) {
- return fileDescriptor_2953ea822e7ffe1e, []int{8}
+ return fileDescriptor_2953ea822e7ffe1e, []int{10}
}
func (m *UserInfo) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -301,6 +357,8 @@ var xxx_messageInfo_UserInfo proto.InternalMessageInfo
func init() {
proto.RegisterType((*BoundObjectReference)(nil), "k8s.io.api.authentication.v1.BoundObjectReference")
proto.RegisterType((*ExtraValue)(nil), "k8s.io.api.authentication.v1.ExtraValue")
+ proto.RegisterType((*SelfSubjectReview)(nil), "k8s.io.api.authentication.v1.SelfSubjectReview")
+ proto.RegisterType((*SelfSubjectReviewStatus)(nil), "k8s.io.api.authentication.v1.SelfSubjectReviewStatus")
proto.RegisterType((*TokenRequest)(nil), "k8s.io.api.authentication.v1.TokenRequest")
proto.RegisterType((*TokenRequestSpec)(nil), "k8s.io.api.authentication.v1.TokenRequestSpec")
proto.RegisterType((*TokenRequestStatus)(nil), "k8s.io.api.authentication.v1.TokenRequestStatus")
@@ -316,64 +374,67 @@ func init() {
}
var fileDescriptor_2953ea822e7ffe1e = []byte{
- // 907 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x56, 0xcf, 0x6f, 0xe3, 0x44,
- 0x14, 0x8e, 0xf3, 0xa3, 0x4a, 0x26, 0xdb, 0xd2, 0xce, 0xb2, 0x52, 0x54, 0x96, 0xa4, 0x78, 0x25,
- 0x54, 0x01, 0x6b, 0x6f, 0x23, 0x04, 0xab, 0x45, 0x42, 0xaa, 0x69, 0x04, 0x11, 0x82, 0x5d, 0xcd,
- 0x6e, 0x0b, 0xe2, 0xc4, 0xc4, 0x7e, 0x4d, 0x87, 0xe0, 0xb1, 0xb1, 0xc7, 0x61, 0x73, 0xdb, 0x3f,
- 0x81, 0x23, 0x48, 0x1c, 0xf8, 0x23, 0x90, 0xf8, 0x17, 0x7a, 0x5c, 0x71, 0xea, 0x01, 0x45, 0xd4,
- 0x5c, 0x39, 0x72, 0xe2, 0x84, 0x66, 0x3c, 0xad, 0xe3, 0xa4, 0x4d, 0x73, 0xe2, 0x96, 0x79, 0xef,
- 0x7b, 0xdf, 0xbc, 0xf7, 0xcd, 0xe7, 0x99, 0xa0, 0xde, 0xe8, 0x61, 0x6c, 0xb1, 0xc0, 0x1e, 0x25,
- 0x03, 0x88, 0x38, 0x08, 0x88, 0xed, 0x31, 0x70, 0x2f, 0x88, 0x6c, 0x9d, 0xa0, 0x21, 0xb3, 0x69,
- 0x22, 0x4e, 0x80, 0x0b, 0xe6, 0x52, 0xc1, 0x02, 0x6e, 0x8f, 0xf7, 0xec, 0x21, 0x70, 0x88, 0xa8,
- 0x00, 0xcf, 0x0a, 0xa3, 0x40, 0x04, 0xf8, 0x6e, 0x86, 0xb6, 0x68, 0xc8, 0xac, 0x22, 0xda, 0x1a,
- 0xef, 0x6d, 0xdf, 0x1f, 0x32, 0x71, 0x92, 0x0c, 0x2c, 0x37, 0xf0, 0xed, 0x61, 0x30, 0x0c, 0x6c,
- 0x55, 0x34, 0x48, 0x8e, 0xd5, 0x4a, 0x2d, 0xd4, 0xaf, 0x8c, 0x6c, 0xfb, 0xdd, 0x7c, 0x6b, 0x9f,
- 0xba, 0x27, 0x8c, 0x43, 0x34, 0xb1, 0xc3, 0xd1, 0x50, 0x06, 0x62, 0xdb, 0x07, 0x41, 0xaf, 0x68,
- 0x61, 0xdb, 0xbe, 0xae, 0x2a, 0x4a, 0xb8, 0x60, 0x3e, 0x2c, 0x14, 0xbc, 0x77, 0x53, 0x41, 0xec,
- 0x9e, 0x80, 0x4f, 0xe7, 0xeb, 0xcc, 0xdf, 0x0d, 0xf4, 0xaa, 0x13, 0x24, 0xdc, 0x7b, 0x3c, 0xf8,
- 0x06, 0x5c, 0x41, 0xe0, 0x18, 0x22, 0xe0, 0x2e, 0xe0, 0x1d, 0x54, 0x1d, 0x31, 0xee, 0xb5, 0x8c,
- 0x1d, 0x63, 0xb7, 0xe1, 0xdc, 0x3a, 0x9d, 0x76, 0x4a, 0xe9, 0xb4, 0x53, 0xfd, 0x94, 0x71, 0x8f,
- 0xa8, 0x0c, 0xee, 0x22, 0x44, 0x43, 0x76, 0x04, 0x51, 0xcc, 0x02, 0xde, 0x2a, 0x2b, 0x1c, 0xd6,
- 0x38, 0xb4, 0xff, 0xa4, 0xaf, 0x33, 0x64, 0x06, 0x25, 0x59, 0x39, 0xf5, 0xa1, 0x55, 0x29, 0xb2,
- 0x7e, 0x4e, 0x7d, 0x20, 0x2a, 0x83, 0x1d, 0x54, 0x49, 0xfa, 0x07, 0xad, 0xaa, 0x02, 0x3c, 0xd0,
- 0x80, 0xca, 0x61, 0xff, 0xe0, 0xdf, 0x69, 0xe7, 0x8d, 0xeb, 0x86, 0x14, 0x93, 0x10, 0x62, 0xeb,
- 0xb0, 0x7f, 0x40, 0x64, 0xb1, 0xf9, 0x3e, 0x42, 0xbd, 0xe7, 0x22, 0xa2, 0x47, 0xf4, 0xdb, 0x04,
- 0x70, 0x07, 0xd5, 0x98, 0x00, 0x3f, 0x6e, 0x19, 0x3b, 0x95, 0xdd, 0x86, 0xd3, 0x48, 0xa7, 0x9d,
- 0x5a, 0x5f, 0x06, 0x48, 0x16, 0x7f, 0x54, 0xff, 0xf1, 0x97, 0x4e, 0xe9, 0xc5, 0x1f, 0x3b, 0x25,
- 0xf3, 0xe7, 0x32, 0xba, 0xf5, 0x2c, 0x18, 0x01, 0x27, 0xf0, 0x5d, 0x02, 0xb1, 0xc0, 0x5f, 0xa3,
- 0xba, 0x3c, 0x22, 0x8f, 0x0a, 0xaa, 0x94, 0x68, 0x76, 0x1f, 0x58, 0xb9, 0x3b, 0x2e, 0x9b, 0xb0,
- 0xc2, 0xd1, 0x50, 0x06, 0x62, 0x4b, 0xa2, 0xad, 0xf1, 0x9e, 0x95, 0xc9, 0xf9, 0x19, 0x08, 0x9a,
- 0x6b, 0x92, 0xc7, 0xc8, 0x25, 0x2b, 0x7e, 0x82, 0xaa, 0x71, 0x08, 0xae, 0xd2, 0xaf, 0xd9, 0xb5,
- 0xac, 0x65, 0xde, 0xb3, 0x66, 0x7b, 0x7b, 0x1a, 0x82, 0x9b, 0x2b, 0x28, 0x57, 0x44, 0x31, 0xe1,
- 0x2f, 0xd1, 0x5a, 0x2c, 0xa8, 0x48, 0x62, 0xa5, 0x72, 0xb1, 0xe3, 0x9b, 0x38, 0x55, 0x9d, 0xb3,
- 0xa1, 0x59, 0xd7, 0xb2, 0x35, 0xd1, 0x7c, 0xe6, 0x3f, 0x06, 0xda, 0x9c, 0x6f, 0x01, 0xbf, 0x8d,
- 0x1a, 0x34, 0xf1, 0x98, 0x34, 0xcd, 0x85, 0xc4, 0xeb, 0xe9, 0xb4, 0xd3, 0xd8, 0xbf, 0x08, 0x92,
- 0x3c, 0x8f, 0x3f, 0x42, 0x5b, 0xf0, 0x3c, 0x64, 0x91, 0xda, 0xfd, 0x29, 0xb8, 0x01, 0xf7, 0x62,
- 0x75, 0xd6, 0x15, 0xe7, 0x4e, 0x3a, 0xed, 0x6c, 0xf5, 0xe6, 0x93, 0x64, 0x11, 0x8f, 0x39, 0xda,
- 0x18, 0x14, 0x2c, 0xab, 0x07, 0xed, 0x2e, 0x1f, 0xf4, 0x2a, 0x9b, 0x3b, 0x38, 0x9d, 0x76, 0x36,
- 0x8a, 0x19, 0x32, 0xc7, 0x6e, 0xfe, 0x6a, 0x20, 0xbc, 0xa8, 0x12, 0xbe, 0x87, 0x6a, 0x42, 0x46,
- 0xf5, 0x27, 0xb2, 0xae, 0x45, 0xab, 0x65, 0xd0, 0x2c, 0x87, 0x27, 0xe8, 0x76, 0x3e, 0xc0, 0x33,
- 0xe6, 0x43, 0x2c, 0xa8, 0x1f, 0xea, 0xd3, 0x7e, 0x6b, 0x35, 0x2f, 0xc9, 0x32, 0xe7, 0x35, 0x4d,
- 0x7f, 0xbb, 0xb7, 0x48, 0x47, 0xae, 0xda, 0xc3, 0xfc, 0xa9, 0x8c, 0x9a, 0xba, 0xed, 0x31, 0x83,
- 0xef, 0xff, 0x07, 0x2f, 0x3f, 0x2e, 0x78, 0xf9, 0xfe, 0x4a, 0xbe, 0x93, 0xad, 0x5d, 0x6b, 0xe5,
- 0x2f, 0xe6, 0xac, 0x6c, 0xaf, 0x4e, 0xb9, 0xdc, 0xc9, 0x2e, 0x7a, 0x65, 0x6e, 0xff, 0xd5, 0x8e,
- 0xb3, 0x60, 0xf6, 0xf2, 0x72, 0xb3, 0x9b, 0x7f, 0x1b, 0x68, 0x6b, 0xa1, 0x25, 0xfc, 0x01, 0x5a,
- 0x9f, 0xe9, 0x1c, 0xb2, 0x1b, 0xb6, 0xee, 0xdc, 0xd1, 0xfb, 0xad, 0xef, 0xcf, 0x26, 0x49, 0x11,
- 0x8b, 0x3f, 0x41, 0xd5, 0x24, 0x86, 0x48, 0x2b, 0xfc, 0xe6, 0x72, 0x39, 0x0e, 0x63, 0x88, 0xfa,
- 0xfc, 0x38, 0xc8, 0xa5, 0x95, 0x11, 0xa2, 0x18, 0x8a, 0x93, 0x54, 0x6f, 0xf8, 0x6c, 0xef, 0xa1,
- 0x1a, 0x44, 0x51, 0x10, 0xe9, 0x7b, 0xfb, 0x52, 0x9b, 0x9e, 0x0c, 0x92, 0x2c, 0x67, 0xfe, 0x56,
- 0x46, 0xf5, 0x8b, 0x2d, 0xf1, 0x3b, 0xa8, 0x2e, 0xb7, 0x51, 0x97, 0x7d, 0x26, 0xe8, 0xa6, 0x2e,
- 0x52, 0x18, 0x19, 0x27, 0x97, 0x08, 0xfc, 0x3a, 0xaa, 0x24, 0xcc, 0xd3, 0x6f, 0x48, 0x73, 0xe6,
- 0xd2, 0x27, 0x32, 0x8e, 0x4d, 0xb4, 0x36, 0x8c, 0x82, 0x24, 0x94, 0x36, 0x90, 0x8d, 0x22, 0x79,
- 0xa2, 0x1f, 0xab, 0x08, 0xd1, 0x19, 0x7c, 0x84, 0x6a, 0x20, 0xef, 0x7c, 0x35, 0x4b, 0xb3, 0xbb,
- 0xb7, 0x9a, 0x34, 0x96, 0x7a, 0x27, 0x7a, 0x5c, 0x44, 0x93, 0x99, 0xa9, 0x64, 0x8c, 0x64, 0x74,
- 0xdb, 0x03, 0xfd, 0x96, 0x28, 0x0c, 0xde, 0x44, 0x95, 0x11, 0x4c, 0xb2, 0x89, 0x88, 0xfc, 0x89,
- 0x3f, 0x44, 0xb5, 0xb1, 0x7c, 0x66, 0xf4, 0x91, 0xec, 0x2e, 0xdf, 0x37, 0x7f, 0x96, 0x48, 0x56,
- 0xf6, 0xa8, 0xfc, 0xd0, 0x70, 0x9c, 0xd3, 0xf3, 0x76, 0xe9, 0xe5, 0x79, 0xbb, 0x74, 0x76, 0xde,
- 0x2e, 0xbd, 0x48, 0xdb, 0xc6, 0x69, 0xda, 0x36, 0x5e, 0xa6, 0x6d, 0xe3, 0x2c, 0x6d, 0x1b, 0x7f,
- 0xa6, 0x6d, 0xe3, 0x87, 0xbf, 0xda, 0xa5, 0xaf, 0xee, 0x2e, 0xfb, 0x13, 0xf3, 0x5f, 0x00, 0x00,
- 0x00, 0xff, 0xff, 0x12, 0xb8, 0x31, 0x91, 0xfc, 0x08, 0x00, 0x00,
+ // 958 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x56, 0x4b, 0x6f, 0x23, 0x45,
+ 0x10, 0xf6, 0xf8, 0x11, 0xd9, 0xe5, 0x4d, 0x48, 0x7a, 0x59, 0x61, 0x85, 0xc5, 0x0e, 0xb3, 0x12,
+ 0x8a, 0x80, 0x9d, 0xd9, 0x58, 0x3c, 0x56, 0x8b, 0x84, 0x94, 0x21, 0x16, 0x58, 0x08, 0x76, 0xd5,
+ 0x4e, 0x02, 0x42, 0x42, 0xa2, 0x3d, 0xae, 0x38, 0x83, 0x77, 0x1e, 0xcc, 0xf4, 0x98, 0xf5, 0x6d,
+ 0x7f, 0x02, 0x47, 0x90, 0x38, 0xf0, 0x23, 0x90, 0xf8, 0x0b, 0x39, 0xae, 0x10, 0x87, 0x3d, 0x20,
+ 0x8b, 0x0c, 0x57, 0x8e, 0x9c, 0x38, 0xa1, 0xee, 0xe9, 0xf8, 0x99, 0x4c, 0x7c, 0xda, 0x9b, 0xa7,
+ 0x1e, 0x5f, 0x55, 0x7d, 0x55, 0x5d, 0x65, 0x68, 0x0d, 0xee, 0x47, 0x86, 0xe3, 0x9b, 0x83, 0xb8,
+ 0x8b, 0xa1, 0x87, 0x1c, 0x23, 0x73, 0x88, 0x5e, 0xcf, 0x0f, 0x4d, 0xa5, 0x60, 0x81, 0x63, 0xb2,
+ 0x98, 0x9f, 0xa2, 0xc7, 0x1d, 0x9b, 0x71, 0xc7, 0xf7, 0xcc, 0xe1, 0x9e, 0xd9, 0x47, 0x0f, 0x43,
+ 0xc6, 0xb1, 0x67, 0x04, 0xa1, 0xcf, 0x7d, 0x72, 0x3b, 0xb5, 0x36, 0x58, 0xe0, 0x18, 0xf3, 0xd6,
+ 0xc6, 0x70, 0x6f, 0xfb, 0x6e, 0xdf, 0xe1, 0xa7, 0x71, 0xd7, 0xb0, 0x7d, 0xd7, 0xec, 0xfb, 0x7d,
+ 0xdf, 0x94, 0x4e, 0xdd, 0xf8, 0x44, 0x7e, 0xc9, 0x0f, 0xf9, 0x2b, 0x05, 0xdb, 0x7e, 0x67, 0x1a,
+ 0xda, 0x65, 0xf6, 0xa9, 0xe3, 0x61, 0x38, 0x32, 0x83, 0x41, 0x5f, 0x08, 0x22, 0xd3, 0x45, 0xce,
+ 0x2e, 0x49, 0x61, 0xdb, 0xbc, 0xca, 0x2b, 0x8c, 0x3d, 0xee, 0xb8, 0xb8, 0xe4, 0xf0, 0xde, 0x75,
+ 0x0e, 0x91, 0x7d, 0x8a, 0x2e, 0x5b, 0xf4, 0xd3, 0x7f, 0xd7, 0xe0, 0x65, 0xcb, 0x8f, 0xbd, 0xde,
+ 0xc3, 0xee, 0xb7, 0x68, 0x73, 0x8a, 0x27, 0x18, 0xa2, 0x67, 0x23, 0xd9, 0x81, 0xe2, 0xc0, 0xf1,
+ 0x7a, 0x35, 0x6d, 0x47, 0xdb, 0xad, 0x58, 0x37, 0xce, 0xc6, 0x8d, 0x5c, 0x32, 0x6e, 0x14, 0x3f,
+ 0x75, 0xbc, 0x1e, 0x95, 0x1a, 0xd2, 0x04, 0x60, 0x81, 0x73, 0x8c, 0x61, 0xe4, 0xf8, 0x5e, 0x2d,
+ 0x2f, 0xed, 0x88, 0xb2, 0x83, 0xfd, 0x47, 0x6d, 0xa5, 0xa1, 0x33, 0x56, 0x02, 0xd5, 0x63, 0x2e,
+ 0xd6, 0x0a, 0xf3, 0xa8, 0x9f, 0x33, 0x17, 0xa9, 0xd4, 0x10, 0x0b, 0x0a, 0x71, 0xfb, 0xa0, 0x56,
+ 0x94, 0x06, 0xf7, 0x94, 0x41, 0xe1, 0xa8, 0x7d, 0xf0, 0xdf, 0xb8, 0xf1, 0xfa, 0x55, 0x45, 0xf2,
+ 0x51, 0x80, 0x91, 0x71, 0xd4, 0x3e, 0xa0, 0xc2, 0x59, 0x7f, 0x1f, 0xa0, 0xf5, 0x84, 0x87, 0xec,
+ 0x98, 0x3d, 0x8e, 0x91, 0x34, 0xa0, 0xe4, 0x70, 0x74, 0xa3, 0x9a, 0xb6, 0x53, 0xd8, 0xad, 0x58,
+ 0x95, 0x64, 0xdc, 0x28, 0xb5, 0x85, 0x80, 0xa6, 0xf2, 0x07, 0xe5, 0x1f, 0x7f, 0x69, 0xe4, 0x9e,
+ 0xfe, 0xb9, 0x93, 0xd3, 0xff, 0xd0, 0x60, 0xab, 0x83, 0x8f, 0x4f, 0x3a, 0xb1, 0x62, 0x63, 0xe8,
+ 0xe0, 0xf7, 0xe4, 0x1b, 0x28, 0x8b, 0x3e, 0xf5, 0x18, 0x67, 0x92, 0x8e, 0x6a, 0xf3, 0x9e, 0x31,
+ 0x1d, 0x91, 0x49, 0x26, 0x46, 0x30, 0xe8, 0x0b, 0x41, 0x64, 0x08, 0x6b, 0x63, 0xb8, 0x67, 0xa4,
+ 0x9c, 0x7e, 0x86, 0x9c, 0x4d, 0x89, 0x99, 0xca, 0xe8, 0x04, 0x95, 0x7c, 0x0d, 0x6b, 0x11, 0x67,
+ 0x3c, 0x8e, 0x24, 0x8d, 0xd5, 0xe6, 0xbb, 0x46, 0xd6, 0x08, 0x1a, 0x4b, 0x29, 0x76, 0xa4, 0xb3,
+ 0xb5, 0xa1, 0x82, 0xac, 0xa5, 0xdf, 0x54, 0x81, 0xea, 0x3e, 0xbc, 0x72, 0x85, 0x0b, 0x39, 0x84,
+ 0x72, 0x1c, 0x61, 0xd8, 0xf6, 0x4e, 0x7c, 0x55, 0xdb, 0x1b, 0xd9, 0xb1, 0x8f, 0x94, 0xb5, 0xb5,
+ 0xa9, 0x82, 0x95, 0x2f, 0x24, 0x74, 0x82, 0xa4, 0xff, 0x9c, 0x87, 0x1b, 0x87, 0xfe, 0x00, 0x3d,
+ 0x8a, 0xdf, 0xc5, 0x18, 0xf1, 0x17, 0x40, 0xe1, 0x23, 0x28, 0x46, 0x01, 0xda, 0x8a, 0x40, 0x23,
+ 0xbb, 0x88, 0xd9, 0xdc, 0x3a, 0x01, 0xda, 0xd3, 0x49, 0x14, 0x5f, 0x54, 0x22, 0x91, 0x2f, 0x27,
+ 0x4d, 0x29, 0x2c, 0x65, 0x7c, 0x1d, 0x66, 0x76, 0x3f, 0xfe, 0xd5, 0x60, 0x73, 0x31, 0x05, 0xf2,
+ 0x16, 0x54, 0x58, 0xdc, 0x73, 0xc4, 0xe3, 0xbb, 0x18, 0xd5, 0xf5, 0x64, 0xdc, 0xa8, 0xec, 0x5f,
+ 0x08, 0xe9, 0x54, 0x4f, 0x3e, 0x82, 0x2d, 0x7c, 0x12, 0x38, 0xa1, 0x8c, 0xde, 0x41, 0xdb, 0xf7,
+ 0x7a, 0x91, 0x7c, 0x33, 0x05, 0xeb, 0x56, 0x32, 0x6e, 0x6c, 0xb5, 0x16, 0x95, 0x74, 0xd9, 0x9e,
+ 0x78, 0xb0, 0xd1, 0x9d, 0x7b, 0xfa, 0xaa, 0xd0, 0x66, 0x76, 0xa1, 0x97, 0xad, 0x0b, 0x8b, 0x24,
+ 0xe3, 0xc6, 0xc6, 0xbc, 0x86, 0x2e, 0xa0, 0xeb, 0xbf, 0x6a, 0x40, 0x96, 0x59, 0x22, 0x77, 0xa0,
+ 0xc4, 0x85, 0x54, 0xad, 0x9a, 0x75, 0x45, 0x5a, 0x29, 0x35, 0x4d, 0x75, 0x64, 0x04, 0x37, 0xa7,
+ 0x05, 0x1c, 0x3a, 0x2e, 0x46, 0x9c, 0xb9, 0x81, 0xea, 0xf6, 0x9b, 0xab, 0xcd, 0x92, 0x70, 0xb3,
+ 0x5e, 0x55, 0xf0, 0x37, 0x5b, 0xcb, 0x70, 0xf4, 0xb2, 0x18, 0xfa, 0x4f, 0x79, 0xa8, 0xaa, 0xb4,
+ 0x5f, 0xd0, 0x3a, 0x78, 0x38, 0x37, 0xcb, 0x77, 0x57, 0x9a, 0x3b, 0xf9, 0xa6, 0xaf, 0x1a, 0xe5,
+ 0x2f, 0x16, 0x46, 0xd9, 0x5c, 0x1d, 0x32, 0x7b, 0x92, 0x6d, 0x78, 0x69, 0x21, 0xfe, 0x6a, 0xed,
+ 0x9c, 0x1b, 0xf6, 0x7c, 0xf6, 0xb0, 0xeb, 0xff, 0x68, 0xb0, 0xb5, 0x94, 0x12, 0xf9, 0x00, 0xd6,
+ 0x67, 0x32, 0xc7, 0xf4, 0x52, 0x95, 0xad, 0x5b, 0x2a, 0xde, 0xfa, 0xfe, 0xac, 0x92, 0xce, 0xdb,
+ 0x92, 0x4f, 0xa0, 0x28, 0x96, 0x95, 0x62, 0x78, 0xd5, 0x95, 0x37, 0xa1, 0x56, 0x48, 0xa8, 0x44,
+ 0x98, 0xaf, 0xa4, 0x78, 0xcd, 0xb3, 0xbd, 0x03, 0x25, 0x0c, 0x43, 0x3f, 0x54, 0xf7, 0x6f, 0xc2,
+ 0x4d, 0x4b, 0x08, 0x69, 0xaa, 0xd3, 0x7f, 0xcb, 0xc3, 0x64, 0xa7, 0x92, 0xb7, 0xd3, 0xfd, 0x2c,
+ 0x8f, 0x66, 0x4a, 0xe8, 0xdc, 0xde, 0x15, 0x72, 0x3a, 0xb1, 0x20, 0xaf, 0x41, 0x21, 0x76, 0x7a,
+ 0xea, 0x16, 0x57, 0x67, 0x8e, 0x27, 0x15, 0x72, 0xa2, 0xc3, 0x5a, 0x3f, 0xf4, 0xe3, 0x40, 0x8c,
+ 0x81, 0x48, 0x14, 0x44, 0x47, 0x3f, 0x96, 0x12, 0xaa, 0x34, 0xe4, 0x18, 0x4a, 0x28, 0x6e, 0xa7,
+ 0xac, 0xa5, 0xda, 0xdc, 0x5b, 0x8d, 0x1a, 0x43, 0xde, 0xdb, 0x96, 0xc7, 0xc3, 0xd1, 0x4c, 0x55,
+ 0x42, 0x46, 0x53, 0xb8, 0xed, 0xae, 0xba, 0xc9, 0xd2, 0x86, 0x6c, 0x42, 0x61, 0x80, 0xa3, 0xb4,
+ 0x22, 0x2a, 0x7e, 0x92, 0x0f, 0xa1, 0x34, 0x14, 0xe7, 0x5a, 0xb5, 0x64, 0x37, 0x3b, 0xee, 0xf4,
+ 0xbc, 0xd3, 0xd4, 0xed, 0x41, 0xfe, 0xbe, 0x66, 0x59, 0x67, 0xe7, 0xf5, 0xdc, 0xb3, 0xf3, 0x7a,
+ 0xee, 0xf9, 0x79, 0x3d, 0xf7, 0x34, 0xa9, 0x6b, 0x67, 0x49, 0x5d, 0x7b, 0x96, 0xd4, 0xb5, 0xe7,
+ 0x49, 0x5d, 0xfb, 0x2b, 0xa9, 0x6b, 0x3f, 0xfc, 0x5d, 0xcf, 0x7d, 0x75, 0x3b, 0xeb, 0xcf, 0xe0,
+ 0xff, 0x01, 0x00, 0x00, 0xff, 0xff, 0x0d, 0x9a, 0x38, 0x17, 0x44, 0x0a, 0x00, 0x00,
}
func (m *BoundObjectReference) Marshal() (dAtA []byte, err error) {
@@ -451,6 +512,82 @@ func (m ExtraValue) MarshalToSizedBuffer(dAtA []byte) (int, error) {
return len(dAtA) - i, nil
}
+func (m *SelfSubjectReview) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *SelfSubjectReview) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *SelfSubjectReview) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.Status.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ {
+ size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *SelfSubjectReviewStatus) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *SelfSubjectReviewStatus) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *SelfSubjectReviewStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.UserInfo.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
func (m *TokenRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
@@ -850,6 +987,30 @@ func (m ExtraValue) Size() (n int) {
return n
}
+func (m *SelfSubjectReview) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Status.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *SelfSubjectReviewStatus) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.UserInfo.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
func (m *TokenRequest) Size() (n int) {
if m == nil {
return 0
@@ -999,6 +1160,27 @@ func (this *BoundObjectReference) String() string {
}, "")
return s
}
+func (this *SelfSubjectReview) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&SelfSubjectReview{`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `Status:` + strings.Replace(strings.Replace(this.Status.String(), "SelfSubjectReviewStatus", "SelfSubjectReviewStatus", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *SelfSubjectReviewStatus) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&SelfSubjectReviewStatus{`,
+ `UserInfo:` + strings.Replace(strings.Replace(this.UserInfo.String(), "UserInfo", "UserInfo", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
func (this *TokenRequest) String() string {
if this == nil {
return "nil"
@@ -1361,6 +1543,205 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error {
}
return nil
}
+func (m *SelfSubjectReview) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: SelfSubjectReview: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: SelfSubjectReview: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *SelfSubjectReviewStatus) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: SelfSubjectReviewStatus: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: SelfSubjectReviewStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field UserInfo", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.UserInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
func (m *TokenRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
diff --git a/vendor/k8s.io/api/authentication/v1/generated.proto b/vendor/k8s.io/api/authentication/v1/generated.proto
index f4806a3c63..1632070c87 100644
--- a/vendor/k8s.io/api/authentication/v1/generated.proto
+++ b/vendor/k8s.io/api/authentication/v1/generated.proto
@@ -56,6 +56,26 @@ message ExtraValue {
repeated string items = 1;
}
+// SelfSubjectReview contains the user information that the kube-apiserver has about the user making this request.
+// When using impersonation, users will receive the user info of the user being impersonated. If impersonation or
+// request header authentication is used, any extra keys will have their case ignored and returned as lowercase.
+message SelfSubjectReview {
+ // Standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+ // Status is filled in by the server with the user attributes.
+ optional SelfSubjectReviewStatus status = 2;
+}
+
+// SelfSubjectReviewStatus is filled by the kube-apiserver and sent back to a user.
+message SelfSubjectReviewStatus {
+ // User attributes of the user making this request.
+ // +optional
+ optional UserInfo userInfo = 1;
+}
+
// TokenRequest requests a token for a given service account.
message TokenRequest {
// Standard object's metadata.
diff --git a/vendor/k8s.io/api/authentication/v1/register.go b/vendor/k8s.io/api/authentication/v1/register.go
index c522e4a46d..6a32b5926b 100644
--- a/vendor/k8s.io/api/authentication/v1/register.go
+++ b/vendor/k8s.io/api/authentication/v1/register.go
@@ -46,6 +46,7 @@ func addKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(SchemeGroupVersion,
&TokenReview{},
&TokenRequest{},
+ &SelfSubjectReview{},
)
metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
return nil
diff --git a/vendor/k8s.io/api/authentication/v1/types.go b/vendor/k8s.io/api/authentication/v1/types.go
index 4e221e58c7..b498007c00 100644
--- a/vendor/k8s.io/api/authentication/v1/types.go
+++ b/vendor/k8s.io/api/authentication/v1/types.go
@@ -197,3 +197,28 @@ type BoundObjectReference struct {
// +optional
UID types.UID `json:"uid,omitempty" protobuf:"bytes,4,opt,name=uID,casttype=k8s.io/apimachinery/pkg/types.UID"`
}
+
+// +genclient
+// +genclient:nonNamespaced
+// +genclient:onlyVerbs=create
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// SelfSubjectReview contains the user information that the kube-apiserver has about the user making this request.
+// When using impersonation, users will receive the user info of the user being impersonated. If impersonation or
+// request header authentication is used, any extra keys will have their case ignored and returned as lowercase.
+type SelfSubjectReview struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ // +optional
+ metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+ // Status is filled in by the server with the user attributes.
+ Status SelfSubjectReviewStatus `json:"status,omitempty" protobuf:"bytes,2,opt,name=status"`
+}
+
+// SelfSubjectReviewStatus is filled by the kube-apiserver and sent back to a user.
+type SelfSubjectReviewStatus struct {
+ // User attributes of the user making this request.
+ // +optional
+ UserInfo UserInfo `json:"userInfo,omitempty" protobuf:"bytes,1,opt,name=userInfo"`
+}
diff --git a/vendor/k8s.io/api/authentication/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/authentication/v1/types_swagger_doc_generated.go
index b1a730b816..ebfd4852c0 100644
--- a/vendor/k8s.io/api/authentication/v1/types_swagger_doc_generated.go
+++ b/vendor/k8s.io/api/authentication/v1/types_swagger_doc_generated.go
@@ -39,6 +39,25 @@ func (BoundObjectReference) SwaggerDoc() map[string]string {
return map_BoundObjectReference
}
+var map_SelfSubjectReview = map[string]string{
+ "": "SelfSubjectReview contains the user information that the kube-apiserver has about the user making this request. When using impersonation, users will receive the user info of the user being impersonated. If impersonation or request header authentication is used, any extra keys will have their case ignored and returned as lowercase.",
+ "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "status": "Status is filled in by the server with the user attributes.",
+}
+
+func (SelfSubjectReview) SwaggerDoc() map[string]string {
+ return map_SelfSubjectReview
+}
+
+var map_SelfSubjectReviewStatus = map[string]string{
+ "": "SelfSubjectReviewStatus is filled by the kube-apiserver and sent back to a user.",
+ "userInfo": "User attributes of the user making this request.",
+}
+
+func (SelfSubjectReviewStatus) SwaggerDoc() map[string]string {
+ return map_SelfSubjectReviewStatus
+}
+
var map_TokenRequest = map[string]string{
"": "TokenRequest requests a token for a given service account.",
"metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
diff --git a/vendor/k8s.io/api/authentication/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/authentication/v1/zz_generated.deepcopy.go
index 2af533191b..369c89b863 100644
--- a/vendor/k8s.io/api/authentication/v1/zz_generated.deepcopy.go
+++ b/vendor/k8s.io/api/authentication/v1/zz_generated.deepcopy.go
@@ -61,6 +61,50 @@ func (in ExtraValue) DeepCopy() ExtraValue {
return *out
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SelfSubjectReview) DeepCopyInto(out *SelfSubjectReview) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SelfSubjectReview.
+func (in *SelfSubjectReview) DeepCopy() *SelfSubjectReview {
+ if in == nil {
+ return nil
+ }
+ out := new(SelfSubjectReview)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *SelfSubjectReview) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SelfSubjectReviewStatus) DeepCopyInto(out *SelfSubjectReviewStatus) {
+ *out = *in
+ in.UserInfo.DeepCopyInto(&out.UserInfo)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SelfSubjectReviewStatus.
+func (in *SelfSubjectReviewStatus) DeepCopy() *SelfSubjectReviewStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(SelfSubjectReviewStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TokenRequest) DeepCopyInto(out *TokenRequest) {
*out = *in
diff --git a/vendor/k8s.io/api/batch/v1/generated.pb.go b/vendor/k8s.io/api/batch/v1/generated.pb.go
index feafc23c2b..59a7482a0d 100644
--- a/vendor/k8s.io/api/batch/v1/generated.pb.go
+++ b/vendor/k8s.io/api/batch/v1/generated.pb.go
@@ -495,113 +495,120 @@ func init() {
}
var fileDescriptor_3b52da57c93de713 = []byte{
- // 1696 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x58, 0x4f, 0x73, 0xe3, 0x48,
- 0x15, 0x8f, 0xe2, 0xd8, 0xb1, 0xdb, 0xc9, 0xc4, 0xd3, 0xb3, 0x33, 0x63, 0xc2, 0x96, 0x95, 0xd5,
- 0xec, 0x6e, 0x65, 0xa9, 0x45, 0x66, 0xb2, 0x53, 0x2c, 0xff, 0x8b, 0x91, 0x87, 0x59, 0x26, 0x78,
- 0x36, 0xa6, 0x9d, 0x40, 0xd5, 0xb2, 0x50, 0xc8, 0x52, 0xdb, 0xd1, 0x46, 0x56, 0x1b, 0x75, 0x2b,
- 0xb5, 0xb9, 0x50, 0x54, 0xf1, 0x05, 0xe0, 0xc8, 0x17, 0xe0, 0xc8, 0x05, 0xce, 0x70, 0xa3, 0x72,
- 0xdc, 0xe2, 0xb4, 0xc5, 0x41, 0xc5, 0x88, 0x0f, 0xc0, 0x3d, 0x5c, 0xa8, 0x6e, 0xb5, 0xf5, 0xcf,
- 0x52, 0xc8, 0x6c, 0x15, 0x5b, 0xdc, 0xa2, 0xf7, 0x7e, 0xef, 0xd7, 0x4f, 0xfd, 0x9e, 0x7e, 0xef,
- 0xc5, 0xe0, 0x5b, 0x67, 0x5f, 0xa3, 0xba, 0x43, 0xfa, 0x67, 0xc1, 0x04, 0xfb, 0x1e, 0x66, 0x98,
- 0xf6, 0xcf, 0xb1, 0x67, 0x13, 0xbf, 0x2f, 0x1d, 0xe6, 0xc2, 0xe9, 0x4f, 0x4c, 0x66, 0x9d, 0xf6,
- 0xcf, 0x1f, 0xf6, 0x67, 0xd8, 0xc3, 0xbe, 0xc9, 0xb0, 0xad, 0x2f, 0x7c, 0xc2, 0x08, 0xbc, 0x13,
- 0x83, 0x74, 0x73, 0xe1, 0xe8, 0x02, 0xa4, 0x9f, 0x3f, 0xdc, 0xfd, 0xf2, 0xcc, 0x61, 0xa7, 0xc1,
- 0x44, 0xb7, 0xc8, 0xbc, 0x3f, 0x23, 0x33, 0xd2, 0x17, 0xd8, 0x49, 0x30, 0x15, 0x4f, 0xe2, 0x41,
- 0xfc, 0x15, 0x73, 0xec, 0x6a, 0x99, 0x83, 0x2c, 0xe2, 0xe3, 0x92, 0x73, 0x76, 0x1f, 0xa5, 0x98,
- 0xb9, 0x69, 0x9d, 0x3a, 0x1e, 0xf6, 0x2f, 0xfa, 0x8b, 0xb3, 0x19, 0x37, 0xd0, 0xfe, 0x1c, 0x33,
- 0xb3, 0x2c, 0xaa, 0x5f, 0x15, 0xe5, 0x07, 0x1e, 0x73, 0xe6, 0x78, 0x25, 0xe0, 0xab, 0xff, 0x2d,
- 0x80, 0x5a, 0xa7, 0x78, 0x6e, 0x16, 0xe3, 0xb4, 0x7f, 0x2b, 0x60, 0x73, 0xe0, 0x13, 0xef, 0x90,
- 0x4c, 0xe0, 0xcf, 0x41, 0x93, 0xe7, 0x63, 0x9b, 0xcc, 0xec, 0x2a, 0x7b, 0xca, 0x7e, 0xfb, 0xe0,
- 0x2b, 0x7a, 0x7a, 0x4b, 0x09, 0xad, 0xbe, 0x38, 0x9b, 0x71, 0x03, 0xd5, 0x39, 0x5a, 0x3f, 0x7f,
- 0xa8, 0x1f, 0x4d, 0x3e, 0xc2, 0x16, 0x7b, 0x8e, 0x99, 0x69, 0xc0, 0xcb, 0x50, 0x5d, 0x8b, 0x42,
- 0x15, 0xa4, 0x36, 0x94, 0xb0, 0x42, 0x03, 0x6c, 0xd0, 0x05, 0xb6, 0xba, 0xeb, 0x82, 0x7d, 0x4f,
- 0x2f, 0xa9, 0x81, 0x2e, 0xb3, 0x19, 0x2f, 0xb0, 0x65, 0x6c, 0x49, 0xb6, 0x0d, 0xfe, 0x84, 0x44,
- 0x2c, 0x3c, 0x04, 0x0d, 0xca, 0x4c, 0x16, 0xd0, 0x6e, 0x4d, 0xb0, 0x68, 0xd7, 0xb2, 0x08, 0xa4,
- 0x71, 0x4b, 0xf2, 0x34, 0xe2, 0x67, 0x24, 0x19, 0xb4, 0x3f, 0x28, 0xa0, 0x2d, 0x91, 0x43, 0x87,
- 0x32, 0xf8, 0xe1, 0xca, 0x0d, 0xe8, 0x37, 0xbb, 0x01, 0x1e, 0x2d, 0xde, 0xbf, 0x23, 0x4f, 0x6a,
- 0x2e, 0x2d, 0x99, 0xb7, 0x7f, 0x0c, 0xea, 0x0e, 0xc3, 0x73, 0xda, 0x5d, 0xdf, 0xab, 0xed, 0xb7,
- 0x0f, 0x5e, 0xbd, 0x2e, 0x71, 0x63, 0x5b, 0x12, 0xd5, 0x9f, 0xf1, 0x10, 0x14, 0x47, 0x6a, 0x7f,
- 0xdb, 0x48, 0x12, 0xe6, 0x57, 0x02, 0xdf, 0x06, 0x4d, 0x5e, 0x58, 0x3b, 0x70, 0xb1, 0x48, 0xb8,
- 0x95, 0x26, 0x30, 0x96, 0x76, 0x94, 0x20, 0xe0, 0x3e, 0x68, 0xf2, 0x5e, 0xf8, 0x80, 0x78, 0xb8,
- 0xdb, 0x14, 0xe8, 0x2d, 0x8e, 0x3c, 0x96, 0x36, 0x94, 0x78, 0xe1, 0x09, 0xb8, 0x4f, 0x99, 0xe9,
- 0x33, 0xc7, 0x9b, 0x3d, 0xc1, 0xa6, 0xed, 0x3a, 0x1e, 0x1e, 0x63, 0x8b, 0x78, 0x36, 0x15, 0xb5,
- 0xab, 0x19, 0x5f, 0x8c, 0x42, 0xf5, 0xfe, 0xb8, 0x1c, 0x82, 0xaa, 0x62, 0xe1, 0x87, 0xe0, 0xb6,
- 0x45, 0x3c, 0x2b, 0xf0, 0x7d, 0xec, 0x59, 0x17, 0x23, 0xe2, 0x3a, 0xd6, 0x85, 0x28, 0x63, 0xcb,
- 0xd0, 0x65, 0xde, 0xb7, 0x07, 0x45, 0xc0, 0x55, 0x99, 0x11, 0xad, 0x12, 0xc1, 0x37, 0xc0, 0x26,
- 0x0d, 0xe8, 0x02, 0x7b, 0x76, 0x77, 0x63, 0x4f, 0xd9, 0x6f, 0x1a, 0xed, 0x28, 0x54, 0x37, 0xc7,
- 0xb1, 0x09, 0x2d, 0x7d, 0xf0, 0x27, 0xa0, 0xfd, 0x11, 0x99, 0x1c, 0xe3, 0xf9, 0xc2, 0x35, 0x19,
- 0xee, 0xd6, 0x45, 0x9d, 0x5f, 0x2f, 0x2d, 0xc6, 0x61, 0x8a, 0x13, 0xfd, 0x78, 0x47, 0x26, 0xd9,
- 0xce, 0x38, 0x50, 0x96, 0x0d, 0xfe, 0x0c, 0xec, 0xd2, 0xc0, 0xb2, 0x30, 0xa5, 0xd3, 0xc0, 0x3d,
- 0x24, 0x13, 0xfa, 0x7d, 0x87, 0x32, 0xe2, 0x5f, 0x0c, 0x9d, 0xb9, 0xc3, 0xba, 0x8d, 0x3d, 0x65,
- 0xbf, 0x6e, 0xf4, 0xa2, 0x50, 0xdd, 0x1d, 0x57, 0xa2, 0xd0, 0x35, 0x0c, 0x10, 0x81, 0x7b, 0x53,
- 0xd3, 0x71, 0xb1, 0xbd, 0xc2, 0xbd, 0x29, 0xb8, 0x77, 0xa3, 0x50, 0xbd, 0xf7, 0xb4, 0x14, 0x81,
- 0x2a, 0x22, 0xb5, 0x3f, 0xaf, 0x83, 0xed, 0xdc, 0xf7, 0x02, 0x7f, 0x00, 0x1a, 0xa6, 0xc5, 0x9c,
- 0x73, 0xde, 0x54, 0xbc, 0x55, 0x1f, 0x64, 0x6f, 0x87, 0x2b, 0x5d, 0xfa, 0xd5, 0x23, 0x3c, 0xc5,
- 0xbc, 0x08, 0x38, 0xfd, 0xc8, 0x1e, 0x8b, 0x50, 0x24, 0x29, 0xa0, 0x0b, 0x3a, 0xae, 0x49, 0xd9,
- 0xb2, 0x1f, 0x79, 0xb7, 0x89, 0xfa, 0xb4, 0x0f, 0xbe, 0x74, 0xb3, 0x8f, 0x8b, 0x47, 0x18, 0xaf,
- 0x44, 0xa1, 0xda, 0x19, 0x16, 0x78, 0xd0, 0x0a, 0x33, 0xf4, 0x01, 0x14, 0xb6, 0xe4, 0x0a, 0xc5,
- 0x79, 0xf5, 0x97, 0x3e, 0xef, 0x5e, 0x14, 0xaa, 0x70, 0xb8, 0xc2, 0x84, 0x4a, 0xd8, 0xb5, 0x7f,
- 0x29, 0xa0, 0xf6, 0xf9, 0x08, 0xe8, 0x77, 0x72, 0x02, 0xfa, 0x6a, 0x55, 0xd3, 0x56, 0x8a, 0xe7,
- 0xd3, 0x82, 0x78, 0xf6, 0x2a, 0x19, 0xae, 0x17, 0xce, 0xbf, 0xd6, 0xc0, 0xd6, 0x21, 0x99, 0x0c,
- 0x88, 0x67, 0x3b, 0xcc, 0x21, 0x1e, 0x7c, 0x04, 0x36, 0xd8, 0xc5, 0x62, 0x29, 0x42, 0x7b, 0xcb,
- 0xa3, 0x8f, 0x2f, 0x16, 0xf8, 0x2a, 0x54, 0x3b, 0x59, 0x2c, 0xb7, 0x21, 0x81, 0x86, 0xc3, 0x24,
- 0x9d, 0x75, 0x11, 0xf7, 0x28, 0x7f, 0xdc, 0x55, 0xa8, 0x96, 0x8c, 0x58, 0x3d, 0x61, 0xca, 0x27,
- 0x05, 0x67, 0x60, 0x9b, 0x17, 0x67, 0xe4, 0x93, 0x49, 0xdc, 0x65, 0xb5, 0x97, 0xae, 0xfa, 0x5d,
- 0x99, 0xc0, 0xf6, 0x30, 0x4b, 0x84, 0xf2, 0xbc, 0xf0, 0x3c, 0xee, 0xb1, 0x63, 0xdf, 0xf4, 0x68,
- 0xfc, 0x4a, 0x9f, 0xad, 0xa7, 0x77, 0xe5, 0x69, 0xa2, 0xcf, 0xf2, 0x6c, 0xa8, 0xe4, 0x04, 0xf8,
- 0x26, 0x68, 0xf8, 0xd8, 0xa4, 0xc4, 0x13, 0xfd, 0xdc, 0x4a, 0xab, 0x83, 0x84, 0x15, 0x49, 0x2f,
- 0x7c, 0x0b, 0x6c, 0xce, 0x31, 0xa5, 0xe6, 0x0c, 0x0b, 0xc5, 0x69, 0x19, 0x3b, 0x12, 0xb8, 0xf9,
- 0x3c, 0x36, 0xa3, 0xa5, 0x5f, 0xfb, 0xbd, 0x02, 0x36, 0x3f, 0x9f, 0xe9, 0xf7, 0xed, 0xfc, 0xf4,
- 0xeb, 0x56, 0x75, 0x5e, 0xc5, 0xe4, 0xfb, 0x5d, 0x43, 0x24, 0x2a, 0xa6, 0xde, 0x43, 0xd0, 0x5e,
- 0x98, 0xbe, 0xe9, 0xba, 0xd8, 0x75, 0xe8, 0x5c, 0xe4, 0x5a, 0x37, 0x76, 0xb8, 0x2e, 0x8f, 0x52,
- 0x33, 0xca, 0x62, 0x78, 0x88, 0x45, 0xe6, 0x0b, 0x17, 0xf3, 0xcb, 0x8c, 0xdb, 0x4d, 0x86, 0x0c,
- 0x52, 0x33, 0xca, 0x62, 0xe0, 0x11, 0xb8, 0x1b, 0x2b, 0x58, 0x71, 0x02, 0xd6, 0xc4, 0x04, 0xfc,
- 0x42, 0x14, 0xaa, 0x77, 0x1f, 0x97, 0x01, 0x50, 0x79, 0x1c, 0x9c, 0x81, 0xce, 0x82, 0xd8, 0x5c,
- 0x9c, 0x03, 0x1f, 0xcb, 0xe1, 0xd7, 0x16, 0xf7, 0xfc, 0x46, 0xe9, 0x65, 0x8c, 0x0a, 0xe0, 0x58,
- 0x03, 0x8b, 0x56, 0xb4, 0x42, 0x0a, 0x1f, 0x81, 0xad, 0x89, 0x69, 0x9d, 0x91, 0xe9, 0x34, 0x3b,
- 0x1a, 0x3a, 0x51, 0xa8, 0x6e, 0x19, 0x19, 0x3b, 0xca, 0xa1, 0xe0, 0x4f, 0x41, 0x93, 0x62, 0x17,
- 0x5b, 0x8c, 0xf8, 0xb2, 0x97, 0xdf, 0xb9, 0x61, 0xf9, 0xcd, 0x09, 0x76, 0xc7, 0x32, 0x34, 0x5e,
- 0x29, 0x96, 0x4f, 0x28, 0xa1, 0x84, 0xdf, 0x00, 0xb7, 0xe6, 0xa6, 0x17, 0x98, 0x09, 0x52, 0x34,
- 0x71, 0xd3, 0x80, 0x51, 0xa8, 0xde, 0x7a, 0x9e, 0xf3, 0xa0, 0x02, 0x12, 0xfe, 0x10, 0x34, 0xd9,
- 0x72, 0x5e, 0x37, 0x44, 0x6a, 0xa5, 0x13, 0x69, 0x44, 0xec, 0xdc, 0xb8, 0x4e, 0xda, 0x31, 0x99,
- 0xd5, 0x09, 0x0d, 0xdf, 0x70, 0x18, 0x73, 0x65, 0x69, 0x1e, 0x4f, 0x19, 0xf6, 0x9f, 0x3a, 0x9e,
- 0x43, 0x4f, 0xb1, 0x2d, 0x56, 0xa3, 0x7a, 0xbc, 0xe1, 0x1c, 0x1f, 0x0f, 0xcb, 0x20, 0xa8, 0x2a,
- 0x16, 0x0e, 0xc1, 0xad, 0xb4, 0x87, 0x9e, 0x13, 0x1b, 0x77, 0x5b, 0xe2, 0x0b, 0x7c, 0x9d, 0xbf,
- 0xe5, 0x20, 0xe7, 0xb9, 0x5a, 0xb1, 0xa0, 0x42, 0x6c, 0x76, 0xa3, 0x01, 0xd5, 0x1b, 0x8d, 0xf6,
- 0xdb, 0x3a, 0x68, 0xa5, 0xc3, 0xfb, 0x04, 0x00, 0x6b, 0xa9, 0x90, 0x54, 0x0e, 0xf0, 0xd7, 0xaa,
- 0xbe, 0xb6, 0x44, 0x4b, 0xd3, 0xc1, 0x93, 0x98, 0x28, 0xca, 0x10, 0xc1, 0x1f, 0x83, 0x96, 0x58,
- 0xeb, 0x84, 0xd6, 0xad, 0xbf, 0xb4, 0xd6, 0x6d, 0x47, 0xa1, 0xda, 0x1a, 0x2f, 0x09, 0x50, 0xca,
- 0x05, 0xa7, 0xd9, 0x2b, 0xfb, 0x8c, 0xba, 0x0d, 0xf3, 0xd7, 0x2b, 0x8e, 0x28, 0xb0, 0x72, 0xf5,
- 0x94, 0x4b, 0xcd, 0x86, 0x28, 0x70, 0xd5, 0xbe, 0xd2, 0x07, 0x2d, 0xb1, 0x80, 0x61, 0x1b, 0xdb,
- 0xa2, 0x47, 0xeb, 0xc6, 0x6d, 0x09, 0x6d, 0x8d, 0x97, 0x0e, 0x94, 0x62, 0x38, 0x71, 0xbc, 0x59,
- 0xc9, 0xfd, 0x2e, 0x21, 0x8e, 0xf7, 0x30, 0x24, 0xbd, 0xf0, 0x09, 0xe8, 0xc8, 0x94, 0xb0, 0xfd,
- 0xcc, 0xb3, 0xf1, 0xc7, 0x98, 0x8a, 0x4f, 0xb3, 0x65, 0x74, 0x65, 0x44, 0x67, 0x50, 0xf0, 0xa3,
- 0x95, 0x08, 0xf8, 0x6b, 0x05, 0xdc, 0x0f, 0x3c, 0x8b, 0x04, 0x1e, 0xc3, 0xf6, 0x31, 0xf6, 0xe7,
- 0x8e, 0xc7, 0xff, 0x9f, 0x1b, 0x11, 0x9b, 0x8a, 0xce, 0x6d, 0x1f, 0xbc, 0x5d, 0x5a, 0xec, 0x93,
- 0xf2, 0x98, 0xb8, 0xcf, 0x2b, 0x9c, 0xa8, 0xea, 0x24, 0xa8, 0x82, 0xba, 0x8f, 0x4d, 0xfb, 0x42,
- 0xb4, 0x77, 0xdd, 0x68, 0x71, 0xbd, 0x46, 0xdc, 0x80, 0x62, 0xbb, 0xf6, 0x47, 0x05, 0xec, 0x14,
- 0xd6, 0xe7, 0xff, 0xff, 0xfd, 0x48, 0x9b, 0x80, 0x15, 0x7d, 0x85, 0xef, 0x83, 0xba, 0x1f, 0xb8,
- 0x78, 0xf9, 0x29, 0xbd, 0x75, 0x23, 0xad, 0x46, 0x81, 0x8b, 0xd3, 0x49, 0xc6, 0x9f, 0x28, 0x8a,
- 0x69, 0xb4, 0xbf, 0x2b, 0xe0, 0xcd, 0x22, 0xfc, 0xc8, 0xfb, 0xde, 0xc7, 0x0e, 0x1b, 0x10, 0x1b,
- 0x53, 0x84, 0x7f, 0x11, 0x38, 0x3e, 0x9e, 0x63, 0x8f, 0xc1, 0x77, 0xc1, 0xb6, 0x45, 0x3c, 0x66,
- 0xf2, 0x6b, 0x79, 0xdf, 0x9c, 0x2f, 0xd7, 0xab, 0xdb, 0x7c, 0x43, 0x19, 0x64, 0x1d, 0x28, 0x8f,
- 0x83, 0x63, 0xd0, 0x24, 0x0b, 0xfe, 0x8f, 0x3e, 0xf1, 0xe5, 0x6a, 0xf5, 0xee, 0x52, 0x0b, 0x8f,
- 0xa4, 0xfd, 0x2a, 0x54, 0x1f, 0x5c, 0x93, 0xc6, 0x12, 0x86, 0x12, 0x22, 0xa8, 0x81, 0xc6, 0xb9,
- 0xe9, 0x06, 0x98, 0x4f, 0xc0, 0xda, 0x7e, 0xdd, 0x00, 0xbc, 0xc7, 0x7f, 0x24, 0x2c, 0x48, 0x7a,
- 0xb4, 0xbf, 0x94, 0xbe, 0xdc, 0x88, 0xd8, 0xa9, 0xaa, 0x8c, 0x4c, 0xc6, 0xb0, 0xef, 0xc1, 0xf7,
- 0x72, 0x2b, 0xe3, 0x3b, 0x85, 0x95, 0xf1, 0x41, 0xc9, 0xe2, 0x97, 0xa5, 0xf9, 0x5f, 0x6d, 0x91,
- 0xda, 0xe5, 0x3a, 0x78, 0xa5, 0xac, 0x9a, 0xf0, 0xbb, 0xb1, 0x7e, 0x10, 0x4f, 0x66, 0xbc, 0x9f,
- 0xd5, 0x0f, 0xe2, 0x5d, 0x85, 0xea, 0xbd, 0x62, 0x5c, 0xec, 0x41, 0x32, 0x0e, 0x7a, 0xa0, 0x4d,
- 0xd2, 0x1b, 0x96, 0x4d, 0xfa, 0xcd, 0x1b, 0xf5, 0x53, 0x79, 0x83, 0xc4, 0x1b, 0x4c, 0xd6, 0x97,
- 0x3d, 0x00, 0xfe, 0x12, 0xec, 0x90, 0xfc, 0xdd, 0x8b, 0xca, 0xdd, 0xfc, 0xcc, 0xb2, 0xba, 0x19,
- 0xf7, 0xe5, 0x7b, 0xef, 0x14, 0xfc, 0xa8, 0x78, 0x98, 0xf6, 0x27, 0x05, 0x54, 0x29, 0x0b, 0x1c,
- 0x65, 0x55, 0x96, 0x7f, 0x59, 0x2d, 0xe3, 0x20, 0xa7, 0xb0, 0x57, 0xa1, 0xfa, 0x5a, 0xd5, 0x8f,
- 0x5a, 0xbc, 0xec, 0x54, 0x3f, 0x79, 0xf6, 0x24, 0x2b, 0xc3, 0xef, 0x25, 0x32, 0xbc, 0x2e, 0xe8,
- 0xfa, 0xa9, 0x04, 0xdf, 0x8c, 0x4b, 0x86, 0x1b, 0x5f, 0xbf, 0x7c, 0xd1, 0x5b, 0xfb, 0xe4, 0x45,
- 0x6f, 0xed, 0xd3, 0x17, 0xbd, 0xb5, 0x5f, 0x45, 0x3d, 0xe5, 0x32, 0xea, 0x29, 0x9f, 0x44, 0x3d,
- 0xe5, 0xd3, 0xa8, 0xa7, 0xfc, 0x23, 0xea, 0x29, 0xbf, 0xf9, 0x67, 0x6f, 0xed, 0x83, 0x3b, 0x25,
- 0xbf, 0x32, 0xfe, 0x27, 0x00, 0x00, 0xff, 0xff, 0xf2, 0x8e, 0x19, 0x59, 0x94, 0x14, 0x00, 0x00,
+ // 1797 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x58, 0xcd, 0x6f, 0x23, 0x49,
+ 0x15, 0x8f, 0x93, 0x38, 0xb1, 0xcb, 0xf9, 0xf0, 0xd4, 0x64, 0x66, 0x4c, 0x58, 0xb9, 0xb3, 0x9e,
+ 0xdd, 0x55, 0x16, 0x2d, 0xed, 0x9d, 0xec, 0x88, 0xe5, 0x5b, 0x3b, 0x9d, 0x61, 0x96, 0x09, 0xce,
+ 0x8e, 0x29, 0x67, 0x40, 0x5a, 0x16, 0x44, 0xb9, 0xbb, 0xec, 0xf4, 0xa6, 0xdd, 0xd5, 0x74, 0x55,
+ 0x47, 0x93, 0x0b, 0x42, 0xe2, 0x0f, 0x80, 0xbf, 0x82, 0x23, 0x17, 0x38, 0xc3, 0x0d, 0xcd, 0x71,
+ 0xc5, 0x69, 0xc5, 0xa1, 0xc5, 0x34, 0x7f, 0x00, 0xf7, 0x20, 0x24, 0x54, 0xd5, 0xe5, 0xfe, 0x72,
+ 0x77, 0xc8, 0xac, 0xc4, 0x88, 0x5b, 0xfa, 0xbd, 0xdf, 0xfb, 0xd5, 0xc7, 0x7b, 0xf5, 0x7b, 0x2f,
+ 0x06, 0xdf, 0x3e, 0xfb, 0x3a, 0xd3, 0x6d, 0xda, 0x3f, 0x0b, 0xc6, 0xc4, 0x77, 0x09, 0x27, 0xac,
+ 0x7f, 0x4e, 0x5c, 0x8b, 0xfa, 0x7d, 0xe5, 0xc0, 0x9e, 0xdd, 0x1f, 0x63, 0x6e, 0x9e, 0xf6, 0xcf,
+ 0xef, 0xf5, 0xa7, 0xc4, 0x25, 0x3e, 0xe6, 0xc4, 0xd2, 0x3d, 0x9f, 0x72, 0x0a, 0x6f, 0xc6, 0x20,
+ 0x1d, 0x7b, 0xb6, 0x2e, 0x41, 0xfa, 0xf9, 0xbd, 0xdd, 0xaf, 0x4e, 0x6d, 0x7e, 0x1a, 0x8c, 0x75,
+ 0x93, 0xce, 0xfa, 0x53, 0x3a, 0xa5, 0x7d, 0x89, 0x1d, 0x07, 0x13, 0xf9, 0x25, 0x3f, 0xe4, 0x5f,
+ 0x31, 0xc7, 0x6e, 0x2f, 0xb3, 0x90, 0x49, 0x7d, 0x52, 0xb2, 0xce, 0xee, 0xfd, 0x14, 0x33, 0xc3,
+ 0xe6, 0xa9, 0xed, 0x12, 0xff, 0xa2, 0xef, 0x9d, 0x4d, 0x85, 0x81, 0xf5, 0x67, 0x84, 0xe3, 0xb2,
+ 0xa8, 0x7e, 0x55, 0x94, 0x1f, 0xb8, 0xdc, 0x9e, 0x91, 0x85, 0x80, 0xaf, 0xfd, 0xb7, 0x00, 0x66,
+ 0x9e, 0x92, 0x19, 0x2e, 0xc6, 0xf5, 0xfe, 0x55, 0x03, 0xeb, 0x87, 0x3e, 0x75, 0x8f, 0xe8, 0x18,
+ 0xfe, 0x1c, 0x34, 0xc4, 0x7e, 0x2c, 0xcc, 0x71, 0xa7, 0xb6, 0x57, 0xdb, 0x6f, 0x1d, 0xbc, 0xab,
+ 0xa7, 0xb7, 0x94, 0xd0, 0xea, 0xde, 0xd9, 0x54, 0x18, 0x98, 0x2e, 0xd0, 0xfa, 0xf9, 0x3d, 0xfd,
+ 0xc9, 0xf8, 0x53, 0x62, 0xf2, 0x63, 0xc2, 0xb1, 0x01, 0x9f, 0x87, 0xda, 0x52, 0x14, 0x6a, 0x20,
+ 0xb5, 0xa1, 0x84, 0x15, 0x1a, 0x60, 0x95, 0x79, 0xc4, 0xec, 0x2c, 0x4b, 0xf6, 0x3d, 0xbd, 0x24,
+ 0x07, 0xba, 0xda, 0xcd, 0xc8, 0x23, 0xa6, 0xb1, 0xa1, 0xd8, 0x56, 0xc5, 0x17, 0x92, 0xb1, 0xf0,
+ 0x08, 0xac, 0x31, 0x8e, 0x79, 0xc0, 0x3a, 0x2b, 0x92, 0xa5, 0x77, 0x25, 0x8b, 0x44, 0x1a, 0x5b,
+ 0x8a, 0x67, 0x2d, 0xfe, 0x46, 0x8a, 0xa1, 0xf7, 0xfb, 0x1a, 0x68, 0x29, 0xe4, 0xc0, 0x66, 0x1c,
+ 0x7e, 0xb2, 0x70, 0x03, 0xfa, 0xf5, 0x6e, 0x40, 0x44, 0xcb, 0xf3, 0xb7, 0xd5, 0x4a, 0x8d, 0xb9,
+ 0x25, 0x73, 0xfa, 0x07, 0xa0, 0x6e, 0x73, 0x32, 0x63, 0x9d, 0xe5, 0xbd, 0x95, 0xfd, 0xd6, 0xc1,
+ 0x6b, 0x57, 0x6d, 0xdc, 0xd8, 0x54, 0x44, 0xf5, 0xc7, 0x22, 0x04, 0xc5, 0x91, 0xbd, 0xbf, 0xae,
+ 0x26, 0x1b, 0x16, 0x57, 0x02, 0xdf, 0x01, 0x0d, 0x91, 0x58, 0x2b, 0x70, 0x88, 0xdc, 0x70, 0x33,
+ 0xdd, 0xc0, 0x48, 0xd9, 0x51, 0x82, 0x80, 0xfb, 0xa0, 0x21, 0x6a, 0xe1, 0x63, 0xea, 0x92, 0x4e,
+ 0x43, 0xa2, 0x37, 0x04, 0xf2, 0x44, 0xd9, 0x50, 0xe2, 0x85, 0x4f, 0xc1, 0x1d, 0xc6, 0xb1, 0xcf,
+ 0x6d, 0x77, 0xfa, 0x90, 0x60, 0xcb, 0xb1, 0x5d, 0x32, 0x22, 0x26, 0x75, 0x2d, 0x26, 0x73, 0xb7,
+ 0x62, 0x7c, 0x39, 0x0a, 0xb5, 0x3b, 0xa3, 0x72, 0x08, 0xaa, 0x8a, 0x85, 0x9f, 0x80, 0x1b, 0x26,
+ 0x75, 0xcd, 0xc0, 0xf7, 0x89, 0x6b, 0x5e, 0x0c, 0xa9, 0x63, 0x9b, 0x17, 0x32, 0x8d, 0x4d, 0x43,
+ 0x57, 0xfb, 0xbe, 0x71, 0x58, 0x04, 0x5c, 0x96, 0x19, 0xd1, 0x22, 0x11, 0x7c, 0x13, 0xac, 0xb3,
+ 0x80, 0x79, 0xc4, 0xb5, 0x3a, 0xab, 0x7b, 0xb5, 0xfd, 0x86, 0xd1, 0x8a, 0x42, 0x6d, 0x7d, 0x14,
+ 0x9b, 0xd0, 0xdc, 0x07, 0x7f, 0x02, 0x5a, 0x9f, 0xd2, 0xf1, 0x09, 0x99, 0x79, 0x0e, 0xe6, 0xa4,
+ 0x53, 0x97, 0x79, 0x7e, 0xa3, 0x34, 0x19, 0x47, 0x29, 0x4e, 0xd6, 0xe3, 0x4d, 0xb5, 0xc9, 0x56,
+ 0xc6, 0x81, 0xb2, 0x6c, 0xf0, 0x67, 0x60, 0x97, 0x05, 0xa6, 0x49, 0x18, 0x9b, 0x04, 0xce, 0x11,
+ 0x1d, 0xb3, 0xef, 0xdb, 0x8c, 0x53, 0xff, 0x62, 0x60, 0xcf, 0x6c, 0xde, 0x59, 0xdb, 0xab, 0xed,
+ 0xd7, 0x8d, 0x6e, 0x14, 0x6a, 0xbb, 0xa3, 0x4a, 0x14, 0xba, 0x82, 0x01, 0x22, 0x70, 0x7b, 0x82,
+ 0x6d, 0x87, 0x58, 0x0b, 0xdc, 0xeb, 0x92, 0x7b, 0x37, 0x0a, 0xb5, 0xdb, 0x8f, 0x4a, 0x11, 0xa8,
+ 0x22, 0xb2, 0xf7, 0xa7, 0x65, 0xb0, 0x99, 0x7b, 0x2f, 0xf0, 0x07, 0x60, 0x0d, 0x9b, 0xdc, 0x3e,
+ 0x17, 0x45, 0x25, 0x4a, 0xf5, 0x6e, 0xf6, 0x76, 0x84, 0xd2, 0xa5, 0xaf, 0x1e, 0x91, 0x09, 0x11,
+ 0x49, 0x20, 0xe9, 0x23, 0x7b, 0x20, 0x43, 0x91, 0xa2, 0x80, 0x0e, 0x68, 0x3b, 0x98, 0xf1, 0x79,
+ 0x3d, 0x8a, 0x6a, 0x93, 0xf9, 0x69, 0x1d, 0x7c, 0xe5, 0x7a, 0x8f, 0x4b, 0x44, 0x18, 0x3b, 0x51,
+ 0xa8, 0xb5, 0x07, 0x05, 0x1e, 0xb4, 0xc0, 0x0c, 0x7d, 0x00, 0xa5, 0x2d, 0xb9, 0x42, 0xb9, 0x5e,
+ 0xfd, 0xa5, 0xd7, 0xbb, 0x1d, 0x85, 0x1a, 0x1c, 0x2c, 0x30, 0xa1, 0x12, 0xf6, 0xde, 0x3f, 0x6b,
+ 0x60, 0xe5, 0xd5, 0x08, 0xe8, 0x77, 0x73, 0x02, 0xfa, 0x5a, 0x55, 0xd1, 0x56, 0x8a, 0xe7, 0xa3,
+ 0x82, 0x78, 0x76, 0x2b, 0x19, 0xae, 0x16, 0xce, 0xbf, 0xac, 0x80, 0x8d, 0x23, 0x3a, 0x3e, 0xa4,
+ 0xae, 0x65, 0x73, 0x9b, 0xba, 0xf0, 0x3e, 0x58, 0xe5, 0x17, 0xde, 0x5c, 0x84, 0xf6, 0xe6, 0x4b,
+ 0x9f, 0x5c, 0x78, 0xe4, 0x32, 0xd4, 0xda, 0x59, 0xac, 0xb0, 0x21, 0x89, 0x86, 0x83, 0x64, 0x3b,
+ 0xcb, 0x32, 0xee, 0x7e, 0x7e, 0xb9, 0xcb, 0x50, 0x2b, 0x69, 0xb1, 0x7a, 0xc2, 0x94, 0xdf, 0x14,
+ 0x9c, 0x82, 0x4d, 0x91, 0x9c, 0xa1, 0x4f, 0xc7, 0x71, 0x95, 0xad, 0xbc, 0x74, 0xd6, 0x6f, 0xa9,
+ 0x0d, 0x6c, 0x0e, 0xb2, 0x44, 0x28, 0xcf, 0x0b, 0xcf, 0xe3, 0x1a, 0x3b, 0xf1, 0xb1, 0xcb, 0xe2,
+ 0x23, 0x7d, 0xb1, 0x9a, 0xde, 0x55, 0xab, 0xc9, 0x3a, 0xcb, 0xb3, 0xa1, 0x92, 0x15, 0xe0, 0x5b,
+ 0x60, 0xcd, 0x27, 0x98, 0x51, 0x57, 0xd6, 0x73, 0x33, 0xcd, 0x0e, 0x92, 0x56, 0xa4, 0xbc, 0xf0,
+ 0x6d, 0xb0, 0x3e, 0x23, 0x8c, 0xe1, 0x29, 0x91, 0x8a, 0xd3, 0x34, 0xb6, 0x15, 0x70, 0xfd, 0x38,
+ 0x36, 0xa3, 0xb9, 0xbf, 0xf7, 0xbb, 0x1a, 0x58, 0x7f, 0x35, 0xdd, 0xef, 0x3b, 0xf9, 0xee, 0xd7,
+ 0xa9, 0xaa, 0xbc, 0x8a, 0xce, 0xf7, 0x9b, 0x86, 0xdc, 0xa8, 0xec, 0x7a, 0xf7, 0x40, 0xcb, 0xc3,
+ 0x3e, 0x76, 0x1c, 0xe2, 0xd8, 0x6c, 0x26, 0xf7, 0x5a, 0x37, 0xb6, 0x85, 0x2e, 0x0f, 0x53, 0x33,
+ 0xca, 0x62, 0x44, 0x88, 0x49, 0x67, 0x9e, 0x43, 0xc4, 0x65, 0xc6, 0xe5, 0xa6, 0x42, 0x0e, 0x53,
+ 0x33, 0xca, 0x62, 0xe0, 0x13, 0x70, 0x2b, 0x56, 0xb0, 0x62, 0x07, 0x5c, 0x91, 0x1d, 0xf0, 0x4b,
+ 0x51, 0xa8, 0xdd, 0x7a, 0x50, 0x06, 0x40, 0xe5, 0x71, 0x70, 0x0a, 0xda, 0x1e, 0xb5, 0x84, 0x38,
+ 0x07, 0x3e, 0x51, 0xcd, 0xaf, 0x25, 0xef, 0xf9, 0xcd, 0xd2, 0xcb, 0x18, 0x16, 0xc0, 0xb1, 0x06,
+ 0x16, 0xad, 0x68, 0x81, 0x14, 0xde, 0x07, 0x1b, 0x63, 0x6c, 0x9e, 0xd1, 0xc9, 0x24, 0xdb, 0x1a,
+ 0xda, 0x51, 0xa8, 0x6d, 0x18, 0x19, 0x3b, 0xca, 0xa1, 0xe0, 0x00, 0xec, 0x64, 0xbf, 0x87, 0xc4,
+ 0x7f, 0xec, 0x5a, 0xe4, 0x59, 0x67, 0x43, 0x46, 0x77, 0xa2, 0x50, 0xdb, 0x31, 0x4a, 0xfc, 0xa8,
+ 0x34, 0x0a, 0x7e, 0x00, 0xda, 0x33, 0xfc, 0x2c, 0xee, 0x44, 0xd2, 0x42, 0x58, 0x67, 0x53, 0x32,
+ 0xc9, 0x53, 0x1c, 0x17, 0x7c, 0x68, 0x01, 0x0d, 0x7f, 0x0a, 0x1a, 0x8c, 0x38, 0xc4, 0xe4, 0xd4,
+ 0x57, 0x6f, 0xeb, 0xbd, 0x6b, 0x96, 0x23, 0x1e, 0x13, 0x67, 0xa4, 0x42, 0xe3, 0x11, 0x67, 0xfe,
+ 0x85, 0x12, 0x4a, 0xf8, 0x4d, 0xb0, 0x35, 0xc3, 0x6e, 0x80, 0x13, 0xa4, 0x7c, 0x54, 0x0d, 0x03,
+ 0x46, 0xa1, 0xb6, 0x75, 0x9c, 0xf3, 0xa0, 0x02, 0x12, 0xfe, 0x10, 0x34, 0xf8, 0x7c, 0x7e, 0x58,
+ 0x93, 0x5b, 0x2b, 0xed, 0x90, 0x43, 0x6a, 0xe5, 0xc6, 0x87, 0xe4, 0x79, 0x24, 0xb3, 0x43, 0x42,
+ 0x23, 0x26, 0x2e, 0xce, 0x1d, 0x55, 0x2a, 0x0f, 0x26, 0x9c, 0xf8, 0x8f, 0x6c, 0xd7, 0x66, 0xa7,
+ 0xc4, 0x92, 0xa3, 0x5a, 0x3d, 0x9e, 0xb8, 0x4e, 0x4e, 0x06, 0x65, 0x10, 0x54, 0x15, 0x0b, 0x07,
+ 0x60, 0x2b, 0xad, 0xe9, 0x63, 0x6a, 0x91, 0x4e, 0x53, 0x2a, 0xc2, 0x1b, 0xe2, 0x94, 0x87, 0x39,
+ 0xcf, 0xe5, 0x82, 0x05, 0x15, 0x62, 0xb3, 0x13, 0x16, 0xb8, 0x62, 0xc2, 0xb2, 0xc0, 0x8e, 0x47,
+ 0x2d, 0x44, 0x3c, 0x07, 0x9b, 0x64, 0x46, 0x5c, 0xae, 0x8a, 0x7d, 0x4b, 0x2e, 0xfd, 0xae, 0xa8,
+ 0xa4, 0x61, 0x89, 0xff, 0xb2, 0xc2, 0x8e, 0x4a, 0xd9, 0x7a, 0xff, 0xae, 0x83, 0x66, 0x3a, 0xb2,
+ 0x3c, 0x05, 0xc0, 0x9c, 0xf7, 0x05, 0xa6, 0xc6, 0x96, 0xd7, 0xab, 0x34, 0x26, 0xe9, 0x20, 0x69,
+ 0xbb, 0x4d, 0x4c, 0x0c, 0x65, 0x88, 0xe0, 0x8f, 0x41, 0x53, 0x0e, 0xb3, 0x52, 0xe1, 0x97, 0x5f,
+ 0x5a, 0xe1, 0x37, 0xa3, 0x50, 0x6b, 0x8e, 0xe6, 0x04, 0x28, 0xe5, 0x82, 0x93, 0x6c, 0x62, 0xbe,
+ 0x60, 0xb7, 0x82, 0xf9, 0x24, 0xca, 0x25, 0x0a, 0xac, 0xa2, 0x67, 0xa8, 0x51, 0x6e, 0x55, 0x96,
+ 0x51, 0xd5, 0x94, 0xd6, 0x07, 0x4d, 0x39, 0x76, 0x12, 0x8b, 0x58, 0xf2, 0x25, 0xd4, 0x8d, 0x1b,
+ 0x0a, 0xda, 0x1c, 0xcd, 0x1d, 0x28, 0xc5, 0x08, 0xe2, 0x78, 0x9e, 0x54, 0x53, 0x6d, 0x42, 0x1c,
+ 0xbf, 0x62, 0xa4, 0xbc, 0x42, 0x79, 0x39, 0xf1, 0x67, 0xb6, 0x8b, 0xc5, 0x7f, 0x04, 0x52, 0xf0,
+ 0x94, 0xf2, 0x9e, 0xa4, 0x66, 0x94, 0xc5, 0xc0, 0x87, 0xa0, 0xad, 0x4e, 0x91, 0x6a, 0xc7, 0xba,
+ 0xac, 0x9d, 0x8e, 0x5a, 0xa4, 0x7d, 0x58, 0xf0, 0xa3, 0x85, 0x08, 0xf8, 0x3e, 0xd8, 0x9c, 0xe4,
+ 0xe4, 0x07, 0x48, 0x8a, 0x1b, 0xa2, 0xbd, 0xe7, 0xb5, 0x27, 0x8f, 0x83, 0xbf, 0xae, 0x81, 0x3b,
+ 0x81, 0x6b, 0xd2, 0xc0, 0xe5, 0xc4, 0x9a, 0x6f, 0x92, 0x58, 0x43, 0x6a, 0x31, 0xf9, 0x16, 0x5b,
+ 0x07, 0xef, 0x94, 0x16, 0xd6, 0xd3, 0xf2, 0x98, 0xf8, 0xe5, 0x56, 0x38, 0x51, 0xd5, 0x4a, 0x50,
+ 0x03, 0x75, 0x9f, 0x60, 0xeb, 0x42, 0x3e, 0xd8, 0xba, 0xd1, 0x14, 0x1d, 0x11, 0x09, 0x03, 0x8a,
+ 0xed, 0xbd, 0x3f, 0xd4, 0xc0, 0x76, 0xe1, 0x1f, 0x94, 0xff, 0xff, 0x09, 0xb4, 0x37, 0x06, 0x0b,
+ 0x1d, 0x0c, 0x7e, 0x04, 0xea, 0x7e, 0xe0, 0x90, 0xf9, 0xb3, 0x7d, 0xfb, 0x5a, 0xdd, 0x10, 0x05,
+ 0x0e, 0x49, 0x67, 0x05, 0xf1, 0xc5, 0x50, 0x4c, 0xd3, 0xfb, 0x5b, 0x0d, 0xbc, 0x55, 0x84, 0x3f,
+ 0x71, 0xbf, 0xf7, 0xcc, 0xe6, 0x87, 0xd4, 0x22, 0x0c, 0x91, 0x5f, 0x04, 0xb6, 0x2f, 0xa5, 0x44,
+ 0x14, 0x89, 0x49, 0x5d, 0x8e, 0xc5, 0xb5, 0x7c, 0x84, 0x67, 0xf3, 0x01, 0x56, 0x16, 0xc9, 0x61,
+ 0xd6, 0x81, 0xf2, 0x38, 0x38, 0x02, 0x0d, 0xea, 0x11, 0x1f, 0x8b, 0xc6, 0x11, 0x0f, 0xaf, 0xef,
+ 0xcf, 0xd5, 0xfd, 0x89, 0xb2, 0x5f, 0x86, 0xda, 0xdd, 0x2b, 0xb6, 0x31, 0x87, 0xa1, 0x84, 0x08,
+ 0xf6, 0xc0, 0xda, 0x39, 0x76, 0x02, 0x22, 0x66, 0x8c, 0x95, 0xfd, 0xba, 0x01, 0xc4, 0x7b, 0xfa,
+ 0x91, 0xb4, 0x20, 0xe5, 0xe9, 0xfd, 0xb9, 0xf4, 0x70, 0x43, 0x6a, 0xa5, 0x0a, 0x36, 0xc4, 0x9c,
+ 0x13, 0xdf, 0x85, 0x1f, 0xe6, 0x86, 0xf2, 0xf7, 0x0a, 0x43, 0xf9, 0xdd, 0x92, 0xd1, 0x3a, 0x4b,
+ 0xf3, 0xbf, 0x9a, 0xd3, 0x7b, 0xcf, 0x97, 0xc1, 0x4e, 0x59, 0x36, 0xe1, 0x07, 0xb1, 0x56, 0x51,
+ 0x57, 0xed, 0x78, 0x3f, 0xab, 0x55, 0xd4, 0xbd, 0x0c, 0xb5, 0xdb, 0xc5, 0xb8, 0xd8, 0x83, 0x54,
+ 0x1c, 0x74, 0x41, 0x8b, 0xa6, 0x37, 0xac, 0x8a, 0xf4, 0x5b, 0xd7, 0xaa, 0xa7, 0xf2, 0x02, 0x89,
+ 0x95, 0x2a, 0xeb, 0xcb, 0x2e, 0x00, 0x7f, 0x09, 0xb6, 0x69, 0xfe, 0xee, 0x65, 0xe6, 0xae, 0xbf,
+ 0x66, 0x59, 0xde, 0x8c, 0x3b, 0xea, 0xdc, 0xdb, 0x05, 0x3f, 0x2a, 0x2e, 0xd6, 0xfb, 0x63, 0x0d,
+ 0x54, 0x29, 0x0b, 0x1c, 0x66, 0x15, 0x5d, 0xbc, 0xac, 0xa6, 0x71, 0x90, 0x53, 0xf3, 0xcb, 0x50,
+ 0x7b, 0xbd, 0xea, 0x67, 0x43, 0x91, 0x76, 0xa6, 0x3f, 0x7d, 0xfc, 0x30, 0x2b, 0xf9, 0x1f, 0x26,
+ 0x92, 0xbf, 0x2c, 0xe9, 0xfa, 0xa9, 0xdc, 0x5f, 0x8f, 0x4b, 0x85, 0x1b, 0xdf, 0x78, 0xfe, 0xa2,
+ 0xbb, 0xf4, 0xd9, 0x8b, 0xee, 0xd2, 0xe7, 0x2f, 0xba, 0x4b, 0xbf, 0x8a, 0xba, 0xb5, 0xe7, 0x51,
+ 0xb7, 0xf6, 0x59, 0xd4, 0xad, 0x7d, 0x1e, 0x75, 0x6b, 0x7f, 0x8f, 0xba, 0xb5, 0xdf, 0xfe, 0xa3,
+ 0xbb, 0xf4, 0xf1, 0xcd, 0x92, 0xdf, 0x71, 0xff, 0x13, 0x00, 0x00, 0xff, 0xff, 0x43, 0xdf, 0xa6,
+ 0x7c, 0xf6, 0x15, 0x00, 0x00,
}
func (m *CronJob) Marshal() (dAtA []byte, err error) {
@@ -1023,6 +1030,23 @@ func (m *JobSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
_ = i
var l int
_ = l
+ if m.PodReplacementPolicy != nil {
+ i -= len(*m.PodReplacementPolicy)
+ copy(dAtA[i:], *m.PodReplacementPolicy)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PodReplacementPolicy)))
+ i--
+ dAtA[i] = 0x72
+ }
+ if m.MaxFailedIndexes != nil {
+ i = encodeVarintGenerated(dAtA, i, uint64(*m.MaxFailedIndexes))
+ i--
+ dAtA[i] = 0x68
+ }
+ if m.BackoffLimitPerIndex != nil {
+ i = encodeVarintGenerated(dAtA, i, uint64(*m.BackoffLimitPerIndex))
+ i--
+ dAtA[i] = 0x60
+ }
if m.PodFailurePolicy != nil {
{
size, err := m.PodFailurePolicy.MarshalToSizedBuffer(dAtA[:i])
@@ -1132,6 +1156,18 @@ func (m *JobStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
_ = i
var l int
_ = l
+ if m.Terminating != nil {
+ i = encodeVarintGenerated(dAtA, i, uint64(*m.Terminating))
+ i--
+ dAtA[i] = 0x58
+ }
+ if m.FailedIndexes != nil {
+ i -= len(*m.FailedIndexes)
+ copy(dAtA[i:], *m.FailedIndexes)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FailedIndexes)))
+ i--
+ dAtA[i] = 0x52
+ }
if m.Ready != nil {
i = encodeVarintGenerated(dAtA, i, uint64(*m.Ready))
i--
@@ -1645,6 +1681,16 @@ func (m *JobSpec) Size() (n int) {
l = m.PodFailurePolicy.Size()
n += 1 + l + sovGenerated(uint64(l))
}
+ if m.BackoffLimitPerIndex != nil {
+ n += 1 + sovGenerated(uint64(*m.BackoffLimitPerIndex))
+ }
+ if m.MaxFailedIndexes != nil {
+ n += 1 + sovGenerated(uint64(*m.MaxFailedIndexes))
+ }
+ if m.PodReplacementPolicy != nil {
+ l = len(*m.PodReplacementPolicy)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
return n
}
@@ -1680,6 +1726,13 @@ func (m *JobStatus) Size() (n int) {
if m.Ready != nil {
n += 1 + sovGenerated(uint64(*m.Ready))
}
+ if m.FailedIndexes != nil {
+ l = len(*m.FailedIndexes)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.Terminating != nil {
+ n += 1 + sovGenerated(uint64(*m.Terminating))
+ }
return n
}
@@ -1913,6 +1966,9 @@ func (this *JobSpec) String() string {
`CompletionMode:` + valueToStringGenerated(this.CompletionMode) + `,`,
`Suspend:` + valueToStringGenerated(this.Suspend) + `,`,
`PodFailurePolicy:` + strings.Replace(this.PodFailurePolicy.String(), "PodFailurePolicy", "PodFailurePolicy", 1) + `,`,
+ `BackoffLimitPerIndex:` + valueToStringGenerated(this.BackoffLimitPerIndex) + `,`,
+ `MaxFailedIndexes:` + valueToStringGenerated(this.MaxFailedIndexes) + `,`,
+ `PodReplacementPolicy:` + valueToStringGenerated(this.PodReplacementPolicy) + `,`,
`}`,
}, "")
return s
@@ -1936,6 +1992,8 @@ func (this *JobStatus) String() string {
`CompletedIndexes:` + fmt.Sprintf("%v", this.CompletedIndexes) + `,`,
`UncountedTerminatedPods:` + strings.Replace(this.UncountedTerminatedPods.String(), "UncountedTerminatedPods", "UncountedTerminatedPods", 1) + `,`,
`Ready:` + valueToStringGenerated(this.Ready) + `,`,
+ `FailedIndexes:` + valueToStringGenerated(this.FailedIndexes) + `,`,
+ `Terminating:` + valueToStringGenerated(this.Terminating) + `,`,
`}`,
}, "")
return s
@@ -3527,6 +3585,79 @@ func (m *JobSpec) Unmarshal(dAtA []byte) error {
return err
}
iNdEx = postIndex
+ case 12:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field BackoffLimitPerIndex", wireType)
+ }
+ var v int32
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.BackoffLimitPerIndex = &v
+ case 13:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MaxFailedIndexes", wireType)
+ }
+ var v int32
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.MaxFailedIndexes = &v
+ case 14:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PodReplacementPolicy", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := PodReplacementPolicy(dAtA[iNdEx:postIndex])
+ m.PodReplacementPolicy = &s
+ iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
@@ -3828,6 +3959,59 @@ func (m *JobStatus) Unmarshal(dAtA []byte) error {
}
}
m.Ready = &v
+ case 10:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field FailedIndexes", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.FailedIndexes = &s
+ iNdEx = postIndex
+ case 11:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Terminating", wireType)
+ }
+ var v int32
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Terminating = &v
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
diff --git a/vendor/k8s.io/api/batch/v1/generated.proto b/vendor/k8s.io/api/batch/v1/generated.proto
index a21b5f1b37..e1bef3a463 100644
--- a/vendor/k8s.io/api/batch/v1/generated.proto
+++ b/vendor/k8s.io/api/batch/v1/generated.proto
@@ -223,6 +223,30 @@ message JobSpec {
// +optional
optional int32 backoffLimit = 7;
+ // Specifies the limit for the number of retries within an
+ // index before marking this index as failed. When enabled the number of
+ // failures per index is kept in the pod's
+ // batch.kubernetes.io/job-index-failure-count annotation. It can only
+ // be set when Job's completionMode=Indexed, and the Pod's restart
+ // policy is Never. The field is immutable.
+ // This field is alpha-level. It can be used when the `JobBackoffLimitPerIndex`
+ // feature gate is enabled (disabled by default).
+ // +optional
+ optional int32 backoffLimitPerIndex = 12;
+
+ // Specifies the maximal number of failed indexes before marking the Job as
+ // failed, when backoffLimitPerIndex is set. Once the number of failed
+ // indexes exceeds this number the entire Job is marked as Failed and its
+ // execution is terminated. When left as null the job continues execution of
+ // all of its indexes and is marked with the `Complete` Job condition.
+ // It can only be specified when backoffLimitPerIndex is set.
+ // It can be null or up to completions. It is required and must be
+ // less than or equal to 10^4 when is completions greater than 10^5.
+ // This field is alpha-level. It can be used when the `JobBackoffLimitPerIndex`
+ // feature gate is enabled (disabled by default).
+ // +optional
+ optional int32 maxFailedIndexes = 13;
+
// A label query over pods that should match the pod count.
// Normally, the system sets this field for you.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
@@ -292,6 +316,19 @@ message JobSpec {
//
// +optional
optional bool suspend = 10;
+
+ // podReplacementPolicy specifies when to create replacement Pods.
+ // Possible values are:
+ // - TerminatingOrFailed means that we recreate pods
+ // when they are terminating (has a metadata.deletionTimestamp) or failed.
+ // - Failed means to wait until a previously created Pod is fully terminated (has phase
+ // Failed or Succeeded) before creating a replacement Pod.
+ //
+ // When using podFailurePolicy, Failed is the the only allowed value.
+ // TerminatingOrFailed and Failed are allowed values when podFailurePolicy is not in use.
+ // This is an alpha field. Enable JobPodReplacementPolicy to be able to use this field.
+ // +optional
+ optional string podReplacementPolicy = 14;
}
// JobStatus represents the current state of a Job.
@@ -335,6 +372,14 @@ message JobStatus {
// +optional
optional int32 failed = 6;
+ // The number of pods which are terminating (in phase Pending or Running
+ // and have a deletionTimestamp).
+ //
+ // This field is alpha-level. The job controller populates the field when
+ // the feature gate JobPodReplacementPolicy is enabled (disabled by default).
+ // +optional
+ optional int32 terminating = 11;
+
// completedIndexes holds the completed indexes when .spec.completionMode =
// "Indexed" in a text format. The indexes are represented as decimal integers
// separated by commas. The numbers are listed in increasing order. Three or
@@ -345,6 +390,19 @@ message JobStatus {
// +optional
optional string completedIndexes = 7;
+ // FailedIndexes holds the failed indexes when backoffLimitPerIndex=true.
+ // The indexes are represented in the text format analogous as for the
+ // `completedIndexes` field, ie. they are kept as decimal integers
+ // separated by commas. The numbers are listed in increasing order. Three or
+ // more consecutive numbers are compressed and represented by the first and
+ // last element of the series, separated by a hyphen.
+ // For example, if the failed indexes are 1, 3, 4, 5 and 7, they are
+ // represented as "1,3-5,7".
+ // This field is alpha-level. It can be used when the `JobBackoffLimitPerIndex`
+ // feature gate is enabled (disabled by default).
+ // +optional
+ optional string failedIndexes = 10;
+
// uncountedTerminatedPods holds the UIDs of Pods that have terminated but
// the job controller hasn't yet accounted for in the status counters.
//
@@ -452,6 +510,10 @@ message PodFailurePolicyRule {
//
// - FailJob: indicates that the pod's job is marked as Failed and all
// running pods are terminated.
+ // - FailIndex: indicates that the pod's index is marked as Failed and will
+ // not be restarted.
+ // This value is alpha-level. It can be used when the
+ // `JobBackoffLimitPerIndex` feature gate is enabled (disabled by default).
// - Ignore: indicates that the counter towards the .backoffLimit is not
// incremented and a replacement pod is created.
// - Count: indicates that the pod is handled in the default way - the
@@ -468,7 +530,6 @@ message PodFailurePolicyRule {
// as a list of pod condition patterns. The requirement is satisfied if at
// least one pattern matches an actual pod condition. At most 20 elements are allowed.
// +listType=atomic
- // +optional
repeated PodFailurePolicyOnPodConditionsPattern onPodConditions = 3;
}
diff --git a/vendor/k8s.io/api/batch/v1/types.go b/vendor/k8s.io/api/batch/v1/types.go
index c12e91b06c..883d193aae 100644
--- a/vendor/k8s.io/api/batch/v1/types.go
+++ b/vendor/k8s.io/api/batch/v1/types.go
@@ -27,6 +27,11 @@ const (
// More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#label-selector-and-annotation-conventions
labelPrefix = "batch.kubernetes.io/"
+ // CronJobScheduledTimestampAnnotation is the scheduled timestamp annotation for the Job.
+ // It records the original/expected scheduled timestamp for the running job, represented in RFC3339.
+ // The CronJob controller adds this annotation if the CronJobsScheduledAnnotation feature gate (beta in 1.28) is enabled.
+ CronJobScheduledTimestampAnnotation = labelPrefix + "cronjob-scheduled-timestamp"
+
JobCompletionIndexAnnotation = labelPrefix + "job-completion-index"
// JobTrackingFinalizer is a finalizer for Job's pods. It prevents them from
// being deleted before being accounted in the Job status.
@@ -45,6 +50,13 @@ const (
// ControllerUid is used to programatically get pods corresponding to a Job.
// There is a corresponding label without the batch.kubernetes.io that we support for legacy reasons.
ControllerUidLabel = labelPrefix + "controller-uid"
+ // Annotation indicating the number of failures for the index corresponding
+ // to the pod, which are counted towards the backoff limit.
+ JobIndexFailureCountAnnotation = labelPrefix + "job-index-failure-count"
+ // Annotation indicating the number of failures for the index corresponding
+ // to the pod, which don't count towards the backoff limit, according to the
+ // pod failure policy. When the annotation is absent zero is implied.
+ JobIndexIgnoredFailureCountAnnotation = labelPrefix + "job-index-ignored-failure-count"
)
// +genclient
@@ -109,6 +121,11 @@ const (
// pod's job as Failed and terminate all running pods.
PodFailurePolicyActionFailJob PodFailurePolicyAction = "FailJob"
+ // This is an action which might be taken on a pod failure - mark the
+ // Job's index as failed to avoid restarts within this index. This action
+ // can only be used when backoffLimitPerIndex is set.
+ PodFailurePolicyActionFailIndex PodFailurePolicyAction = "FailIndex"
+
// This is an action which might be taken on a pod failure - the counter towards
// .backoffLimit, represented by the job's .status.failed field, is not
// incremented and a replacement pod is created.
@@ -128,6 +145,19 @@ const (
PodFailurePolicyOnExitCodesOpNotIn PodFailurePolicyOnExitCodesOperator = "NotIn"
)
+// PodReplacementPolicy specifies the policy for creating pod replacements.
+// +enum
+type PodReplacementPolicy string
+
+const (
+ // TerminatingOrFailed means that we recreate pods
+ // when they are terminating (has a metadata.deletionTimestamp) or failed.
+ TerminatingOrFailed PodReplacementPolicy = "TerminatingOrFailed"
+ // Failed means to wait until a previously created Pod is fully terminated (has phase
+ // Failed or Succeeded) before creating a replacement Pod.
+ Failed PodReplacementPolicy = "Failed"
+)
+
// PodFailurePolicyOnExitCodesRequirement describes the requirement for handling
// a failed pod based on its container exit codes. In particular, it lookups the
// .state.terminated.exitCode for each app container and init container status,
@@ -186,6 +216,10 @@ type PodFailurePolicyRule struct {
//
// - FailJob: indicates that the pod's job is marked as Failed and all
// running pods are terminated.
+ // - FailIndex: indicates that the pod's index is marked as Failed and will
+ // not be restarted.
+ // This value is alpha-level. It can be used when the
+ // `JobBackoffLimitPerIndex` feature gate is enabled (disabled by default).
// - Ignore: indicates that the counter towards the .backoffLimit is not
// incremented and a replacement pod is created.
// - Count: indicates that the pod is handled in the default way - the
@@ -202,7 +236,6 @@ type PodFailurePolicyRule struct {
// as a list of pod condition patterns. The requirement is satisfied if at
// least one pattern matches an actual pod condition. At most 20 elements are allowed.
// +listType=atomic
- // +optional
OnPodConditions []PodFailurePolicyOnPodConditionsPattern `json:"onPodConditions" protobuf:"bytes,3,opt,name=onPodConditions"`
}
@@ -263,6 +296,30 @@ type JobSpec struct {
// +optional
BackoffLimit *int32 `json:"backoffLimit,omitempty" protobuf:"varint,7,opt,name=backoffLimit"`
+ // Specifies the limit for the number of retries within an
+ // index before marking this index as failed. When enabled the number of
+ // failures per index is kept in the pod's
+ // batch.kubernetes.io/job-index-failure-count annotation. It can only
+ // be set when Job's completionMode=Indexed, and the Pod's restart
+ // policy is Never. The field is immutable.
+ // This field is alpha-level. It can be used when the `JobBackoffLimitPerIndex`
+ // feature gate is enabled (disabled by default).
+ // +optional
+ BackoffLimitPerIndex *int32 `json:"backoffLimitPerIndex,omitempty" protobuf:"varint,12,opt,name=backoffLimitPerIndex"`
+
+ // Specifies the maximal number of failed indexes before marking the Job as
+ // failed, when backoffLimitPerIndex is set. Once the number of failed
+ // indexes exceeds this number the entire Job is marked as Failed and its
+ // execution is terminated. When left as null the job continues execution of
+ // all of its indexes and is marked with the `Complete` Job condition.
+ // It can only be specified when backoffLimitPerIndex is set.
+ // It can be null or up to completions. It is required and must be
+ // less than or equal to 10^4 when is completions greater than 10^5.
+ // This field is alpha-level. It can be used when the `JobBackoffLimitPerIndex`
+ // feature gate is enabled (disabled by default).
+ // +optional
+ MaxFailedIndexes *int32 `json:"maxFailedIndexes,omitempty" protobuf:"varint,13,opt,name=maxFailedIndexes"`
+
// TODO enabled it when https://github.com/kubernetes/kubernetes/issues/28486 has been fixed
// Optional number of failed pods to retain.
// +optional
@@ -337,6 +394,19 @@ type JobSpec struct {
//
// +optional
Suspend *bool `json:"suspend,omitempty" protobuf:"varint,10,opt,name=suspend"`
+
+ // podReplacementPolicy specifies when to create replacement Pods.
+ // Possible values are:
+ // - TerminatingOrFailed means that we recreate pods
+ // when they are terminating (has a metadata.deletionTimestamp) or failed.
+ // - Failed means to wait until a previously created Pod is fully terminated (has phase
+ // Failed or Succeeded) before creating a replacement Pod.
+ //
+ // When using podFailurePolicy, Failed is the the only allowed value.
+ // TerminatingOrFailed and Failed are allowed values when podFailurePolicy is not in use.
+ // This is an alpha field. Enable JobPodReplacementPolicy to be able to use this field.
+ // +optional
+ PodReplacementPolicy *PodReplacementPolicy `json:"podReplacementPolicy,omitempty" protobuf:"bytes,14,opt,name=podReplacementPolicy,casttype=podReplacementPolicy"`
}
// JobStatus represents the current state of a Job.
@@ -380,6 +450,14 @@ type JobStatus struct {
// +optional
Failed int32 `json:"failed,omitempty" protobuf:"varint,6,opt,name=failed"`
+ // The number of pods which are terminating (in phase Pending or Running
+ // and have a deletionTimestamp).
+ //
+ // This field is alpha-level. The job controller populates the field when
+ // the feature gate JobPodReplacementPolicy is enabled (disabled by default).
+ // +optional
+ Terminating *int32 `json:"terminating,omitempty" protobuf:"varint,11,opt,name=terminating"`
+
// completedIndexes holds the completed indexes when .spec.completionMode =
// "Indexed" in a text format. The indexes are represented as decimal integers
// separated by commas. The numbers are listed in increasing order. Three or
@@ -390,6 +468,19 @@ type JobStatus struct {
// +optional
CompletedIndexes string `json:"completedIndexes,omitempty" protobuf:"bytes,7,opt,name=completedIndexes"`
+ // FailedIndexes holds the failed indexes when backoffLimitPerIndex=true.
+ // The indexes are represented in the text format analogous as for the
+ // `completedIndexes` field, ie. they are kept as decimal integers
+ // separated by commas. The numbers are listed in increasing order. Three or
+ // more consecutive numbers are compressed and represented by the first and
+ // last element of the series, separated by a hyphen.
+ // For example, if the failed indexes are 1, 3, 4, 5 and 7, they are
+ // represented as "1,3-5,7".
+ // This field is alpha-level. It can be used when the `JobBackoffLimitPerIndex`
+ // feature gate is enabled (disabled by default).
+ // +optional
+ FailedIndexes *string `json:"failedIndexes,omitempty" protobuf:"bytes,10,opt,name=failedIndexes"`
+
// uncountedTerminatedPods holds the UIDs of Pods that have terminated but
// the job controller hasn't yet accounted for in the status counters.
//
diff --git a/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go
index f6f3141f18..43b4e1e7d9 100644
--- a/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go
+++ b/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go
@@ -117,12 +117,15 @@ var map_JobSpec = map[string]string{
"activeDeadlineSeconds": "Specifies the duration in seconds relative to the startTime that the job may be continuously active before the system tries to terminate it; value must be positive integer. If a Job is suspended (at creation or through an update), this timer will effectively be stopped and reset when the Job is resumed again.",
"podFailurePolicy": "Specifies the policy of handling failed pods. In particular, it allows to specify the set of actions and conditions which need to be satisfied to take the associated action. If empty, the default behaviour applies - the counter of failed pods, represented by the jobs's .status.failed field, is incremented and it is checked against the backoffLimit. This field cannot be used in combination with restartPolicy=OnFailure.\n\nThis field is beta-level. It can be used when the `JobPodFailurePolicy` feature gate is enabled (enabled by default).",
"backoffLimit": "Specifies the number of retries before marking this job failed. Defaults to 6",
+ "backoffLimitPerIndex": "Specifies the limit for the number of retries within an index before marking this index as failed. When enabled the number of failures per index is kept in the pod's batch.kubernetes.io/job-index-failure-count annotation. It can only be set when Job's completionMode=Indexed, and the Pod's restart policy is Never. The field is immutable. This field is alpha-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (disabled by default).",
+ "maxFailedIndexes": "Specifies the maximal number of failed indexes before marking the Job as failed, when backoffLimitPerIndex is set. Once the number of failed indexes exceeds this number the entire Job is marked as Failed and its execution is terminated. When left as null the job continues execution of all of its indexes and is marked with the `Complete` Job condition. It can only be specified when backoffLimitPerIndex is set. It can be null or up to completions. It is required and must be less than or equal to 10^4 when is completions greater than 10^5. This field is alpha-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (disabled by default).",
"selector": "A label query over pods that should match the pod count. Normally, the system sets this field for you. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors",
"manualSelector": "manualSelector controls generation of pod labels and pod selectors. Leave `manualSelector` unset unless you are certain what you are doing. When false or unset, the system pick labels unique to this job and appends those labels to the pod template. When true, the user is responsible for picking unique labels and specifying the selector. Failure to pick a unique label may cause this and other jobs to not function correctly. However, You may see `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` API. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#specifying-your-own-pod-selector",
"template": "Describes the pod that will be created when executing a job. The only allowed template.spec.restartPolicy values are \"Never\" or \"OnFailure\". More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/",
"ttlSecondsAfterFinished": "ttlSecondsAfterFinished limits the lifetime of a Job that has finished execution (either Complete or Failed). If this field is set, ttlSecondsAfterFinished after the Job finishes, it is eligible to be automatically deleted. When the Job is being deleted, its lifecycle guarantees (e.g. finalizers) will be honored. If this field is unset, the Job won't be automatically deleted. If this field is set to zero, the Job becomes eligible to be deleted immediately after it finishes.",
"completionMode": "completionMode specifies how Pod completions are tracked. It can be `NonIndexed` (default) or `Indexed`.\n\n`NonIndexed` means that the Job is considered complete when there have been .spec.completions successfully completed Pods. Each Pod completion is homologous to each other.\n\n`Indexed` means that the Pods of a Job get an associated completion index from 0 to (.spec.completions - 1), available in the annotation batch.kubernetes.io/job-completion-index. The Job is considered complete when there is one successfully completed Pod for each index. When value is `Indexed`, .spec.completions must be specified and `.spec.parallelism` must be less than or equal to 10^5. In addition, The Pod name takes the form `$(job-name)-$(index)-$(random-string)`, the Pod hostname takes the form `$(job-name)-$(index)`.\n\nMore completion modes can be added in the future. If the Job controller observes a mode that it doesn't recognize, which is possible during upgrades due to version skew, the controller skips updates for the Job.",
"suspend": "suspend specifies whether the Job controller should create Pods or not. If a Job is created with suspend set to true, no Pods are created by the Job controller. If a Job is suspended after creation (i.e. the flag goes from false to true), the Job controller will delete all active Pods associated with this Job. Users must design their workload to gracefully handle this. Suspending a Job will reset the StartTime field of the Job, effectively resetting the ActiveDeadlineSeconds timer too. Defaults to false.",
+ "podReplacementPolicy": "podReplacementPolicy specifies when to create replacement Pods. Possible values are: - TerminatingOrFailed means that we recreate pods\n when they are terminating (has a metadata.deletionTimestamp) or failed.\n- Failed means to wait until a previously created Pod is fully terminated (has phase\n Failed or Succeeded) before creating a replacement Pod.\n\nWhen using podFailurePolicy, Failed is the the only allowed value. TerminatingOrFailed and Failed are allowed values when podFailurePolicy is not in use. This is an alpha field. Enable JobPodReplacementPolicy to be able to use this field.",
}
func (JobSpec) SwaggerDoc() map[string]string {
@@ -137,7 +140,9 @@ var map_JobStatus = map[string]string{
"active": "The number of pending and running pods.",
"succeeded": "The number of pods which reached phase Succeeded.",
"failed": "The number of pods which reached phase Failed.",
+ "terminating": "The number of pods which are terminating (in phase Pending or Running and have a deletionTimestamp).\n\nThis field is alpha-level. The job controller populates the field when the feature gate JobPodReplacementPolicy is enabled (disabled by default).",
"completedIndexes": "completedIndexes holds the completed indexes when .spec.completionMode = \"Indexed\" in a text format. The indexes are represented as decimal integers separated by commas. The numbers are listed in increasing order. Three or more consecutive numbers are compressed and represented by the first and last element of the series, separated by a hyphen. For example, if the completed indexes are 1, 3, 4, 5 and 7, they are represented as \"1,3-5,7\".",
+ "failedIndexes": "FailedIndexes holds the failed indexes when backoffLimitPerIndex=true. The indexes are represented in the text format analogous as for the `completedIndexes` field, ie. they are kept as decimal integers separated by commas. The numbers are listed in increasing order. Three or more consecutive numbers are compressed and represented by the first and last element of the series, separated by a hyphen. For example, if the failed indexes are 1, 3, 4, 5 and 7, they are represented as \"1,3-5,7\". This field is alpha-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (disabled by default).",
"uncountedTerminatedPods": "uncountedTerminatedPods holds the UIDs of Pods that have terminated but the job controller hasn't yet accounted for in the status counters.\n\nThe job controller creates pods with a finalizer. When a pod terminates (succeeded or failed), the controller does three steps to account for it in the job status:\n\n1. Add the pod UID to the arrays in this field. 2. Remove the pod finalizer. 3. Remove the pod UID from the arrays while increasing the corresponding\n counter.\n\nOld jobs might not be tracked using this field, in which case the field remains null.",
"ready": "The number of pods which have a Ready condition.\n\nThis field is beta-level. The job controller populates the field when the feature gate JobReadyPods is enabled (enabled by default).",
}
@@ -188,7 +193,7 @@ func (PodFailurePolicyOnPodConditionsPattern) SwaggerDoc() map[string]string {
var map_PodFailurePolicyRule = map[string]string{
"": "PodFailurePolicyRule describes how a pod failure is handled when the requirements are met. One of onExitCodes and onPodConditions, but not both, can be used in each rule.",
- "action": "Specifies the action taken on a pod failure when the requirements are satisfied. Possible values are:\n\n- FailJob: indicates that the pod's job is marked as Failed and all\n running pods are terminated.\n- Ignore: indicates that the counter towards the .backoffLimit is not\n incremented and a replacement pod is created.\n- Count: indicates that the pod is handled in the default way - the\n counter towards the .backoffLimit is incremented.\nAdditional values are considered to be added in the future. Clients should react to an unknown action by skipping the rule.",
+ "action": "Specifies the action taken on a pod failure when the requirements are satisfied. Possible values are:\n\n- FailJob: indicates that the pod's job is marked as Failed and all\n running pods are terminated.\n- FailIndex: indicates that the pod's index is marked as Failed and will\n not be restarted.\n This value is alpha-level. It can be used when the\n `JobBackoffLimitPerIndex` feature gate is enabled (disabled by default).\n- Ignore: indicates that the counter towards the .backoffLimit is not\n incremented and a replacement pod is created.\n- Count: indicates that the pod is handled in the default way - the\n counter towards the .backoffLimit is incremented.\nAdditional values are considered to be added in the future. Clients should react to an unknown action by skipping the rule.",
"onExitCodes": "Represents the requirement on the container exit codes.",
"onPodConditions": "Represents the requirement on the pod conditions. The requirement is represented as a list of pod condition patterns. The requirement is satisfied if at least one pattern matches an actual pod condition. At most 20 elements are allowed.",
}
diff --git a/vendor/k8s.io/api/batch/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/batch/v1/zz_generated.deepcopy.go
index 2a901e9d0f..43fc41515b 100644
--- a/vendor/k8s.io/api/batch/v1/zz_generated.deepcopy.go
+++ b/vendor/k8s.io/api/batch/v1/zz_generated.deepcopy.go
@@ -267,6 +267,16 @@ func (in *JobSpec) DeepCopyInto(out *JobSpec) {
*out = new(int32)
**out = **in
}
+ if in.BackoffLimitPerIndex != nil {
+ in, out := &in.BackoffLimitPerIndex, &out.BackoffLimitPerIndex
+ *out = new(int32)
+ **out = **in
+ }
+ if in.MaxFailedIndexes != nil {
+ in, out := &in.MaxFailedIndexes, &out.MaxFailedIndexes
+ *out = new(int32)
+ **out = **in
+ }
if in.Selector != nil {
in, out := &in.Selector, &out.Selector
*out = new(metav1.LabelSelector)
@@ -293,6 +303,11 @@ func (in *JobSpec) DeepCopyInto(out *JobSpec) {
*out = new(bool)
**out = **in
}
+ if in.PodReplacementPolicy != nil {
+ in, out := &in.PodReplacementPolicy, &out.PodReplacementPolicy
+ *out = new(PodReplacementPolicy)
+ **out = **in
+ }
return
}
@@ -324,6 +339,16 @@ func (in *JobStatus) DeepCopyInto(out *JobStatus) {
in, out := &in.CompletionTime, &out.CompletionTime
*out = (*in).DeepCopy()
}
+ if in.Terminating != nil {
+ in, out := &in.Terminating, &out.Terminating
+ *out = new(int32)
+ **out = **in
+ }
+ if in.FailedIndexes != nil {
+ in, out := &in.FailedIndexes, &out.FailedIndexes
+ *out = new(string)
+ **out = **in
+ }
if in.UncountedTerminatedPods != nil {
in, out := &in.UncountedTerminatedPods, &out.UncountedTerminatedPods
*out = new(UncountedTerminatedPods)
diff --git a/vendor/k8s.io/api/core/v1/annotation_key_constants.go b/vendor/k8s.io/api/core/v1/annotation_key_constants.go
index 61f86f850a..106ba14c3d 100644
--- a/vendor/k8s.io/api/core/v1/annotation_key_constants.go
+++ b/vendor/k8s.io/api/core/v1/annotation_key_constants.go
@@ -56,9 +56,9 @@ const (
// AppArmorBetaContainerAnnotationKeyPrefix is the prefix to an annotation key specifying a container's apparmor profile.
AppArmorBetaContainerAnnotationKeyPrefix = "container.apparmor.security.beta.kubernetes.io/"
- // AppArmorBetaDefaultProfileAnnotatoinKey is the annotation key specifying the default AppArmor profile.
+ // AppArmorBetaDefaultProfileAnnotationKey is the annotation key specifying the default AppArmor profile.
AppArmorBetaDefaultProfileAnnotationKey = "apparmor.security.beta.kubernetes.io/defaultProfileName"
- // AppArmorBetaAllowedProfileAnnotationKey is the annotation key specifying the allowed AppArmor profiles.
+ // AppArmorBetaAllowedProfilesAnnotationKey is the annotation key specifying the allowed AppArmor profiles.
AppArmorBetaAllowedProfilesAnnotationKey = "apparmor.security.beta.kubernetes.io/allowedProfileNames"
// AppArmorBetaProfileRuntimeDefault is the profile specifying the runtime default.
@@ -78,7 +78,7 @@ const (
// in the Annotations of a Node.
PreferAvoidPodsAnnotationKey string = "scheduler.alpha.kubernetes.io/preferAvoidPods"
- // ObjectTTLAnnotations represents a suggestion for kubelet for how long it can cache
+ // ObjectTTLAnnotationKey represents a suggestion for kubelet for how long it can cache
// an object (e.g. secret, config map) before fetching it again from apiserver.
// This annotation can be attached to node.
ObjectTTLAnnotationKey string = "node.alpha.kubernetes.io/ttl"
diff --git a/vendor/k8s.io/api/core/v1/generated.pb.go b/vendor/k8s.io/api/core/v1/generated.pb.go
index c766462960..c267a5febd 100644
--- a/vendor/k8s.io/api/core/v1/generated.pb.go
+++ b/vendor/k8s.io/api/core/v1/generated.pb.go
@@ -1981,10 +1981,38 @@ func (m *HostAlias) XXX_DiscardUnknown() {
var xxx_messageInfo_HostAlias proto.InternalMessageInfo
+func (m *HostIP) Reset() { *m = HostIP{} }
+func (*HostIP) ProtoMessage() {}
+func (*HostIP) Descriptor() ([]byte, []int) {
+ return fileDescriptor_83c10c24ec417dc9, []int{69}
+}
+func (m *HostIP) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *HostIP) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *HostIP) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_HostIP.Merge(m, src)
+}
+func (m *HostIP) XXX_Size() int {
+ return m.Size()
+}
+func (m *HostIP) XXX_DiscardUnknown() {
+ xxx_messageInfo_HostIP.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_HostIP proto.InternalMessageInfo
+
func (m *HostPathVolumeSource) Reset() { *m = HostPathVolumeSource{} }
func (*HostPathVolumeSource) ProtoMessage() {}
func (*HostPathVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{69}
+ return fileDescriptor_83c10c24ec417dc9, []int{70}
}
func (m *HostPathVolumeSource) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -2012,7 +2040,7 @@ var xxx_messageInfo_HostPathVolumeSource proto.InternalMessageInfo
func (m *ISCSIPersistentVolumeSource) Reset() { *m = ISCSIPersistentVolumeSource{} }
func (*ISCSIPersistentVolumeSource) ProtoMessage() {}
func (*ISCSIPersistentVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{70}
+ return fileDescriptor_83c10c24ec417dc9, []int{71}
}
func (m *ISCSIPersistentVolumeSource) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -2040,7 +2068,7 @@ var xxx_messageInfo_ISCSIPersistentVolumeSource proto.InternalMessageInfo
func (m *ISCSIVolumeSource) Reset() { *m = ISCSIVolumeSource{} }
func (*ISCSIVolumeSource) ProtoMessage() {}
func (*ISCSIVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{71}
+ return fileDescriptor_83c10c24ec417dc9, []int{72}
}
func (m *ISCSIVolumeSource) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -2068,7 +2096,7 @@ var xxx_messageInfo_ISCSIVolumeSource proto.InternalMessageInfo
func (m *KeyToPath) Reset() { *m = KeyToPath{} }
func (*KeyToPath) ProtoMessage() {}
func (*KeyToPath) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{72}
+ return fileDescriptor_83c10c24ec417dc9, []int{73}
}
func (m *KeyToPath) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -2096,7 +2124,7 @@ var xxx_messageInfo_KeyToPath proto.InternalMessageInfo
func (m *Lifecycle) Reset() { *m = Lifecycle{} }
func (*Lifecycle) ProtoMessage() {}
func (*Lifecycle) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{73}
+ return fileDescriptor_83c10c24ec417dc9, []int{74}
}
func (m *Lifecycle) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -2124,7 +2152,7 @@ var xxx_messageInfo_Lifecycle proto.InternalMessageInfo
func (m *LifecycleHandler) Reset() { *m = LifecycleHandler{} }
func (*LifecycleHandler) ProtoMessage() {}
func (*LifecycleHandler) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{74}
+ return fileDescriptor_83c10c24ec417dc9, []int{75}
}
func (m *LifecycleHandler) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -2152,7 +2180,7 @@ var xxx_messageInfo_LifecycleHandler proto.InternalMessageInfo
func (m *LimitRange) Reset() { *m = LimitRange{} }
func (*LimitRange) ProtoMessage() {}
func (*LimitRange) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{75}
+ return fileDescriptor_83c10c24ec417dc9, []int{76}
}
func (m *LimitRange) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -2180,7 +2208,7 @@ var xxx_messageInfo_LimitRange proto.InternalMessageInfo
func (m *LimitRangeItem) Reset() { *m = LimitRangeItem{} }
func (*LimitRangeItem) ProtoMessage() {}
func (*LimitRangeItem) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{76}
+ return fileDescriptor_83c10c24ec417dc9, []int{77}
}
func (m *LimitRangeItem) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -2208,7 +2236,7 @@ var xxx_messageInfo_LimitRangeItem proto.InternalMessageInfo
func (m *LimitRangeList) Reset() { *m = LimitRangeList{} }
func (*LimitRangeList) ProtoMessage() {}
func (*LimitRangeList) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{77}
+ return fileDescriptor_83c10c24ec417dc9, []int{78}
}
func (m *LimitRangeList) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -2236,7 +2264,7 @@ var xxx_messageInfo_LimitRangeList proto.InternalMessageInfo
func (m *LimitRangeSpec) Reset() { *m = LimitRangeSpec{} }
func (*LimitRangeSpec) ProtoMessage() {}
func (*LimitRangeSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{78}
+ return fileDescriptor_83c10c24ec417dc9, []int{79}
}
func (m *LimitRangeSpec) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -2264,7 +2292,7 @@ var xxx_messageInfo_LimitRangeSpec proto.InternalMessageInfo
func (m *List) Reset() { *m = List{} }
func (*List) ProtoMessage() {}
func (*List) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{79}
+ return fileDescriptor_83c10c24ec417dc9, []int{80}
}
func (m *List) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -2292,7 +2320,7 @@ var xxx_messageInfo_List proto.InternalMessageInfo
func (m *LoadBalancerIngress) Reset() { *m = LoadBalancerIngress{} }
func (*LoadBalancerIngress) ProtoMessage() {}
func (*LoadBalancerIngress) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{80}
+ return fileDescriptor_83c10c24ec417dc9, []int{81}
}
func (m *LoadBalancerIngress) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -2320,7 +2348,7 @@ var xxx_messageInfo_LoadBalancerIngress proto.InternalMessageInfo
func (m *LoadBalancerStatus) Reset() { *m = LoadBalancerStatus{} }
func (*LoadBalancerStatus) ProtoMessage() {}
func (*LoadBalancerStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{81}
+ return fileDescriptor_83c10c24ec417dc9, []int{82}
}
func (m *LoadBalancerStatus) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -2348,7 +2376,7 @@ var xxx_messageInfo_LoadBalancerStatus proto.InternalMessageInfo
func (m *LocalObjectReference) Reset() { *m = LocalObjectReference{} }
func (*LocalObjectReference) ProtoMessage() {}
func (*LocalObjectReference) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{82}
+ return fileDescriptor_83c10c24ec417dc9, []int{83}
}
func (m *LocalObjectReference) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -2376,7 +2404,7 @@ var xxx_messageInfo_LocalObjectReference proto.InternalMessageInfo
func (m *LocalVolumeSource) Reset() { *m = LocalVolumeSource{} }
func (*LocalVolumeSource) ProtoMessage() {}
func (*LocalVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{83}
+ return fileDescriptor_83c10c24ec417dc9, []int{84}
}
func (m *LocalVolumeSource) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -2404,7 +2432,7 @@ var xxx_messageInfo_LocalVolumeSource proto.InternalMessageInfo
func (m *NFSVolumeSource) Reset() { *m = NFSVolumeSource{} }
func (*NFSVolumeSource) ProtoMessage() {}
func (*NFSVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{84}
+ return fileDescriptor_83c10c24ec417dc9, []int{85}
}
func (m *NFSVolumeSource) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -2432,7 +2460,7 @@ var xxx_messageInfo_NFSVolumeSource proto.InternalMessageInfo
func (m *Namespace) Reset() { *m = Namespace{} }
func (*Namespace) ProtoMessage() {}
func (*Namespace) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{85}
+ return fileDescriptor_83c10c24ec417dc9, []int{86}
}
func (m *Namespace) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -2460,7 +2488,7 @@ var xxx_messageInfo_Namespace proto.InternalMessageInfo
func (m *NamespaceCondition) Reset() { *m = NamespaceCondition{} }
func (*NamespaceCondition) ProtoMessage() {}
func (*NamespaceCondition) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{86}
+ return fileDescriptor_83c10c24ec417dc9, []int{87}
}
func (m *NamespaceCondition) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -2488,7 +2516,7 @@ var xxx_messageInfo_NamespaceCondition proto.InternalMessageInfo
func (m *NamespaceList) Reset() { *m = NamespaceList{} }
func (*NamespaceList) ProtoMessage() {}
func (*NamespaceList) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{87}
+ return fileDescriptor_83c10c24ec417dc9, []int{88}
}
func (m *NamespaceList) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -2516,7 +2544,7 @@ var xxx_messageInfo_NamespaceList proto.InternalMessageInfo
func (m *NamespaceSpec) Reset() { *m = NamespaceSpec{} }
func (*NamespaceSpec) ProtoMessage() {}
func (*NamespaceSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{88}
+ return fileDescriptor_83c10c24ec417dc9, []int{89}
}
func (m *NamespaceSpec) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -2544,7 +2572,7 @@ var xxx_messageInfo_NamespaceSpec proto.InternalMessageInfo
func (m *NamespaceStatus) Reset() { *m = NamespaceStatus{} }
func (*NamespaceStatus) ProtoMessage() {}
func (*NamespaceStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{89}
+ return fileDescriptor_83c10c24ec417dc9, []int{90}
}
func (m *NamespaceStatus) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -2572,7 +2600,7 @@ var xxx_messageInfo_NamespaceStatus proto.InternalMessageInfo
func (m *Node) Reset() { *m = Node{} }
func (*Node) ProtoMessage() {}
func (*Node) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{90}
+ return fileDescriptor_83c10c24ec417dc9, []int{91}
}
func (m *Node) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -2600,7 +2628,7 @@ var xxx_messageInfo_Node proto.InternalMessageInfo
func (m *NodeAddress) Reset() { *m = NodeAddress{} }
func (*NodeAddress) ProtoMessage() {}
func (*NodeAddress) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{91}
+ return fileDescriptor_83c10c24ec417dc9, []int{92}
}
func (m *NodeAddress) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -2628,7 +2656,7 @@ var xxx_messageInfo_NodeAddress proto.InternalMessageInfo
func (m *NodeAffinity) Reset() { *m = NodeAffinity{} }
func (*NodeAffinity) ProtoMessage() {}
func (*NodeAffinity) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{92}
+ return fileDescriptor_83c10c24ec417dc9, []int{93}
}
func (m *NodeAffinity) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -2656,7 +2684,7 @@ var xxx_messageInfo_NodeAffinity proto.InternalMessageInfo
func (m *NodeCondition) Reset() { *m = NodeCondition{} }
func (*NodeCondition) ProtoMessage() {}
func (*NodeCondition) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{93}
+ return fileDescriptor_83c10c24ec417dc9, []int{94}
}
func (m *NodeCondition) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -2684,7 +2712,7 @@ var xxx_messageInfo_NodeCondition proto.InternalMessageInfo
func (m *NodeConfigSource) Reset() { *m = NodeConfigSource{} }
func (*NodeConfigSource) ProtoMessage() {}
func (*NodeConfigSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{94}
+ return fileDescriptor_83c10c24ec417dc9, []int{95}
}
func (m *NodeConfigSource) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -2712,7 +2740,7 @@ var xxx_messageInfo_NodeConfigSource proto.InternalMessageInfo
func (m *NodeConfigStatus) Reset() { *m = NodeConfigStatus{} }
func (*NodeConfigStatus) ProtoMessage() {}
func (*NodeConfigStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{95}
+ return fileDescriptor_83c10c24ec417dc9, []int{96}
}
func (m *NodeConfigStatus) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -2740,7 +2768,7 @@ var xxx_messageInfo_NodeConfigStatus proto.InternalMessageInfo
func (m *NodeDaemonEndpoints) Reset() { *m = NodeDaemonEndpoints{} }
func (*NodeDaemonEndpoints) ProtoMessage() {}
func (*NodeDaemonEndpoints) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{96}
+ return fileDescriptor_83c10c24ec417dc9, []int{97}
}
func (m *NodeDaemonEndpoints) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -2768,7 +2796,7 @@ var xxx_messageInfo_NodeDaemonEndpoints proto.InternalMessageInfo
func (m *NodeList) Reset() { *m = NodeList{} }
func (*NodeList) ProtoMessage() {}
func (*NodeList) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{97}
+ return fileDescriptor_83c10c24ec417dc9, []int{98}
}
func (m *NodeList) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -2796,7 +2824,7 @@ var xxx_messageInfo_NodeList proto.InternalMessageInfo
func (m *NodeProxyOptions) Reset() { *m = NodeProxyOptions{} }
func (*NodeProxyOptions) ProtoMessage() {}
func (*NodeProxyOptions) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{98}
+ return fileDescriptor_83c10c24ec417dc9, []int{99}
}
func (m *NodeProxyOptions) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -2824,7 +2852,7 @@ var xxx_messageInfo_NodeProxyOptions proto.InternalMessageInfo
func (m *NodeResources) Reset() { *m = NodeResources{} }
func (*NodeResources) ProtoMessage() {}
func (*NodeResources) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{99}
+ return fileDescriptor_83c10c24ec417dc9, []int{100}
}
func (m *NodeResources) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -2852,7 +2880,7 @@ var xxx_messageInfo_NodeResources proto.InternalMessageInfo
func (m *NodeSelector) Reset() { *m = NodeSelector{} }
func (*NodeSelector) ProtoMessage() {}
func (*NodeSelector) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{100}
+ return fileDescriptor_83c10c24ec417dc9, []int{101}
}
func (m *NodeSelector) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -2880,7 +2908,7 @@ var xxx_messageInfo_NodeSelector proto.InternalMessageInfo
func (m *NodeSelectorRequirement) Reset() { *m = NodeSelectorRequirement{} }
func (*NodeSelectorRequirement) ProtoMessage() {}
func (*NodeSelectorRequirement) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{101}
+ return fileDescriptor_83c10c24ec417dc9, []int{102}
}
func (m *NodeSelectorRequirement) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -2908,7 +2936,7 @@ var xxx_messageInfo_NodeSelectorRequirement proto.InternalMessageInfo
func (m *NodeSelectorTerm) Reset() { *m = NodeSelectorTerm{} }
func (*NodeSelectorTerm) ProtoMessage() {}
func (*NodeSelectorTerm) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{102}
+ return fileDescriptor_83c10c24ec417dc9, []int{103}
}
func (m *NodeSelectorTerm) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -2936,7 +2964,7 @@ var xxx_messageInfo_NodeSelectorTerm proto.InternalMessageInfo
func (m *NodeSpec) Reset() { *m = NodeSpec{} }
func (*NodeSpec) ProtoMessage() {}
func (*NodeSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{103}
+ return fileDescriptor_83c10c24ec417dc9, []int{104}
}
func (m *NodeSpec) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -2964,7 +2992,7 @@ var xxx_messageInfo_NodeSpec proto.InternalMessageInfo
func (m *NodeStatus) Reset() { *m = NodeStatus{} }
func (*NodeStatus) ProtoMessage() {}
func (*NodeStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{104}
+ return fileDescriptor_83c10c24ec417dc9, []int{105}
}
func (m *NodeStatus) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -2992,7 +3020,7 @@ var xxx_messageInfo_NodeStatus proto.InternalMessageInfo
func (m *NodeSystemInfo) Reset() { *m = NodeSystemInfo{} }
func (*NodeSystemInfo) ProtoMessage() {}
func (*NodeSystemInfo) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{105}
+ return fileDescriptor_83c10c24ec417dc9, []int{106}
}
func (m *NodeSystemInfo) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3020,7 +3048,7 @@ var xxx_messageInfo_NodeSystemInfo proto.InternalMessageInfo
func (m *ObjectFieldSelector) Reset() { *m = ObjectFieldSelector{} }
func (*ObjectFieldSelector) ProtoMessage() {}
func (*ObjectFieldSelector) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{106}
+ return fileDescriptor_83c10c24ec417dc9, []int{107}
}
func (m *ObjectFieldSelector) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3048,7 +3076,7 @@ var xxx_messageInfo_ObjectFieldSelector proto.InternalMessageInfo
func (m *ObjectReference) Reset() { *m = ObjectReference{} }
func (*ObjectReference) ProtoMessage() {}
func (*ObjectReference) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{107}
+ return fileDescriptor_83c10c24ec417dc9, []int{108}
}
func (m *ObjectReference) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3076,7 +3104,7 @@ var xxx_messageInfo_ObjectReference proto.InternalMessageInfo
func (m *PersistentVolume) Reset() { *m = PersistentVolume{} }
func (*PersistentVolume) ProtoMessage() {}
func (*PersistentVolume) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{108}
+ return fileDescriptor_83c10c24ec417dc9, []int{109}
}
func (m *PersistentVolume) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3104,7 +3132,7 @@ var xxx_messageInfo_PersistentVolume proto.InternalMessageInfo
func (m *PersistentVolumeClaim) Reset() { *m = PersistentVolumeClaim{} }
func (*PersistentVolumeClaim) ProtoMessage() {}
func (*PersistentVolumeClaim) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{109}
+ return fileDescriptor_83c10c24ec417dc9, []int{110}
}
func (m *PersistentVolumeClaim) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3132,7 +3160,7 @@ var xxx_messageInfo_PersistentVolumeClaim proto.InternalMessageInfo
func (m *PersistentVolumeClaimCondition) Reset() { *m = PersistentVolumeClaimCondition{} }
func (*PersistentVolumeClaimCondition) ProtoMessage() {}
func (*PersistentVolumeClaimCondition) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{110}
+ return fileDescriptor_83c10c24ec417dc9, []int{111}
}
func (m *PersistentVolumeClaimCondition) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3160,7 +3188,7 @@ var xxx_messageInfo_PersistentVolumeClaimCondition proto.InternalMessageInfo
func (m *PersistentVolumeClaimList) Reset() { *m = PersistentVolumeClaimList{} }
func (*PersistentVolumeClaimList) ProtoMessage() {}
func (*PersistentVolumeClaimList) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{111}
+ return fileDescriptor_83c10c24ec417dc9, []int{112}
}
func (m *PersistentVolumeClaimList) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3188,7 +3216,7 @@ var xxx_messageInfo_PersistentVolumeClaimList proto.InternalMessageInfo
func (m *PersistentVolumeClaimSpec) Reset() { *m = PersistentVolumeClaimSpec{} }
func (*PersistentVolumeClaimSpec) ProtoMessage() {}
func (*PersistentVolumeClaimSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{112}
+ return fileDescriptor_83c10c24ec417dc9, []int{113}
}
func (m *PersistentVolumeClaimSpec) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3216,7 +3244,7 @@ var xxx_messageInfo_PersistentVolumeClaimSpec proto.InternalMessageInfo
func (m *PersistentVolumeClaimStatus) Reset() { *m = PersistentVolumeClaimStatus{} }
func (*PersistentVolumeClaimStatus) ProtoMessage() {}
func (*PersistentVolumeClaimStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{113}
+ return fileDescriptor_83c10c24ec417dc9, []int{114}
}
func (m *PersistentVolumeClaimStatus) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3244,7 +3272,7 @@ var xxx_messageInfo_PersistentVolumeClaimStatus proto.InternalMessageInfo
func (m *PersistentVolumeClaimTemplate) Reset() { *m = PersistentVolumeClaimTemplate{} }
func (*PersistentVolumeClaimTemplate) ProtoMessage() {}
func (*PersistentVolumeClaimTemplate) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{114}
+ return fileDescriptor_83c10c24ec417dc9, []int{115}
}
func (m *PersistentVolumeClaimTemplate) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3272,7 +3300,7 @@ var xxx_messageInfo_PersistentVolumeClaimTemplate proto.InternalMessageInfo
func (m *PersistentVolumeClaimVolumeSource) Reset() { *m = PersistentVolumeClaimVolumeSource{} }
func (*PersistentVolumeClaimVolumeSource) ProtoMessage() {}
func (*PersistentVolumeClaimVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{115}
+ return fileDescriptor_83c10c24ec417dc9, []int{116}
}
func (m *PersistentVolumeClaimVolumeSource) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3300,7 +3328,7 @@ var xxx_messageInfo_PersistentVolumeClaimVolumeSource proto.InternalMessageInfo
func (m *PersistentVolumeList) Reset() { *m = PersistentVolumeList{} }
func (*PersistentVolumeList) ProtoMessage() {}
func (*PersistentVolumeList) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{116}
+ return fileDescriptor_83c10c24ec417dc9, []int{117}
}
func (m *PersistentVolumeList) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3328,7 +3356,7 @@ var xxx_messageInfo_PersistentVolumeList proto.InternalMessageInfo
func (m *PersistentVolumeSource) Reset() { *m = PersistentVolumeSource{} }
func (*PersistentVolumeSource) ProtoMessage() {}
func (*PersistentVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{117}
+ return fileDescriptor_83c10c24ec417dc9, []int{118}
}
func (m *PersistentVolumeSource) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3356,7 +3384,7 @@ var xxx_messageInfo_PersistentVolumeSource proto.InternalMessageInfo
func (m *PersistentVolumeSpec) Reset() { *m = PersistentVolumeSpec{} }
func (*PersistentVolumeSpec) ProtoMessage() {}
func (*PersistentVolumeSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{118}
+ return fileDescriptor_83c10c24ec417dc9, []int{119}
}
func (m *PersistentVolumeSpec) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3384,7 +3412,7 @@ var xxx_messageInfo_PersistentVolumeSpec proto.InternalMessageInfo
func (m *PersistentVolumeStatus) Reset() { *m = PersistentVolumeStatus{} }
func (*PersistentVolumeStatus) ProtoMessage() {}
func (*PersistentVolumeStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{119}
+ return fileDescriptor_83c10c24ec417dc9, []int{120}
}
func (m *PersistentVolumeStatus) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3412,7 +3440,7 @@ var xxx_messageInfo_PersistentVolumeStatus proto.InternalMessageInfo
func (m *PhotonPersistentDiskVolumeSource) Reset() { *m = PhotonPersistentDiskVolumeSource{} }
func (*PhotonPersistentDiskVolumeSource) ProtoMessage() {}
func (*PhotonPersistentDiskVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{120}
+ return fileDescriptor_83c10c24ec417dc9, []int{121}
}
func (m *PhotonPersistentDiskVolumeSource) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3440,7 +3468,7 @@ var xxx_messageInfo_PhotonPersistentDiskVolumeSource proto.InternalMessageInfo
func (m *Pod) Reset() { *m = Pod{} }
func (*Pod) ProtoMessage() {}
func (*Pod) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{121}
+ return fileDescriptor_83c10c24ec417dc9, []int{122}
}
func (m *Pod) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3468,7 +3496,7 @@ var xxx_messageInfo_Pod proto.InternalMessageInfo
func (m *PodAffinity) Reset() { *m = PodAffinity{} }
func (*PodAffinity) ProtoMessage() {}
func (*PodAffinity) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{122}
+ return fileDescriptor_83c10c24ec417dc9, []int{123}
}
func (m *PodAffinity) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3496,7 +3524,7 @@ var xxx_messageInfo_PodAffinity proto.InternalMessageInfo
func (m *PodAffinityTerm) Reset() { *m = PodAffinityTerm{} }
func (*PodAffinityTerm) ProtoMessage() {}
func (*PodAffinityTerm) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{123}
+ return fileDescriptor_83c10c24ec417dc9, []int{124}
}
func (m *PodAffinityTerm) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3524,7 +3552,7 @@ var xxx_messageInfo_PodAffinityTerm proto.InternalMessageInfo
func (m *PodAntiAffinity) Reset() { *m = PodAntiAffinity{} }
func (*PodAntiAffinity) ProtoMessage() {}
func (*PodAntiAffinity) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{124}
+ return fileDescriptor_83c10c24ec417dc9, []int{125}
}
func (m *PodAntiAffinity) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3552,7 +3580,7 @@ var xxx_messageInfo_PodAntiAffinity proto.InternalMessageInfo
func (m *PodAttachOptions) Reset() { *m = PodAttachOptions{} }
func (*PodAttachOptions) ProtoMessage() {}
func (*PodAttachOptions) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{125}
+ return fileDescriptor_83c10c24ec417dc9, []int{126}
}
func (m *PodAttachOptions) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3580,7 +3608,7 @@ var xxx_messageInfo_PodAttachOptions proto.InternalMessageInfo
func (m *PodCondition) Reset() { *m = PodCondition{} }
func (*PodCondition) ProtoMessage() {}
func (*PodCondition) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{126}
+ return fileDescriptor_83c10c24ec417dc9, []int{127}
}
func (m *PodCondition) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3608,7 +3636,7 @@ var xxx_messageInfo_PodCondition proto.InternalMessageInfo
func (m *PodDNSConfig) Reset() { *m = PodDNSConfig{} }
func (*PodDNSConfig) ProtoMessage() {}
func (*PodDNSConfig) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{127}
+ return fileDescriptor_83c10c24ec417dc9, []int{128}
}
func (m *PodDNSConfig) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3636,7 +3664,7 @@ var xxx_messageInfo_PodDNSConfig proto.InternalMessageInfo
func (m *PodDNSConfigOption) Reset() { *m = PodDNSConfigOption{} }
func (*PodDNSConfigOption) ProtoMessage() {}
func (*PodDNSConfigOption) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{128}
+ return fileDescriptor_83c10c24ec417dc9, []int{129}
}
func (m *PodDNSConfigOption) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3664,7 +3692,7 @@ var xxx_messageInfo_PodDNSConfigOption proto.InternalMessageInfo
func (m *PodExecOptions) Reset() { *m = PodExecOptions{} }
func (*PodExecOptions) ProtoMessage() {}
func (*PodExecOptions) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{129}
+ return fileDescriptor_83c10c24ec417dc9, []int{130}
}
func (m *PodExecOptions) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3692,7 +3720,7 @@ var xxx_messageInfo_PodExecOptions proto.InternalMessageInfo
func (m *PodIP) Reset() { *m = PodIP{} }
func (*PodIP) ProtoMessage() {}
func (*PodIP) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{130}
+ return fileDescriptor_83c10c24ec417dc9, []int{131}
}
func (m *PodIP) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3720,7 +3748,7 @@ var xxx_messageInfo_PodIP proto.InternalMessageInfo
func (m *PodList) Reset() { *m = PodList{} }
func (*PodList) ProtoMessage() {}
func (*PodList) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{131}
+ return fileDescriptor_83c10c24ec417dc9, []int{132}
}
func (m *PodList) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3748,7 +3776,7 @@ var xxx_messageInfo_PodList proto.InternalMessageInfo
func (m *PodLogOptions) Reset() { *m = PodLogOptions{} }
func (*PodLogOptions) ProtoMessage() {}
func (*PodLogOptions) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{132}
+ return fileDescriptor_83c10c24ec417dc9, []int{133}
}
func (m *PodLogOptions) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3776,7 +3804,7 @@ var xxx_messageInfo_PodLogOptions proto.InternalMessageInfo
func (m *PodOS) Reset() { *m = PodOS{} }
func (*PodOS) ProtoMessage() {}
func (*PodOS) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{133}
+ return fileDescriptor_83c10c24ec417dc9, []int{134}
}
func (m *PodOS) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3804,7 +3832,7 @@ var xxx_messageInfo_PodOS proto.InternalMessageInfo
func (m *PodPortForwardOptions) Reset() { *m = PodPortForwardOptions{} }
func (*PodPortForwardOptions) ProtoMessage() {}
func (*PodPortForwardOptions) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{134}
+ return fileDescriptor_83c10c24ec417dc9, []int{135}
}
func (m *PodPortForwardOptions) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3832,7 +3860,7 @@ var xxx_messageInfo_PodPortForwardOptions proto.InternalMessageInfo
func (m *PodProxyOptions) Reset() { *m = PodProxyOptions{} }
func (*PodProxyOptions) ProtoMessage() {}
func (*PodProxyOptions) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{135}
+ return fileDescriptor_83c10c24ec417dc9, []int{136}
}
func (m *PodProxyOptions) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3860,7 +3888,7 @@ var xxx_messageInfo_PodProxyOptions proto.InternalMessageInfo
func (m *PodReadinessGate) Reset() { *m = PodReadinessGate{} }
func (*PodReadinessGate) ProtoMessage() {}
func (*PodReadinessGate) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{136}
+ return fileDescriptor_83c10c24ec417dc9, []int{137}
}
func (m *PodReadinessGate) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3888,7 +3916,7 @@ var xxx_messageInfo_PodReadinessGate proto.InternalMessageInfo
func (m *PodResourceClaim) Reset() { *m = PodResourceClaim{} }
func (*PodResourceClaim) ProtoMessage() {}
func (*PodResourceClaim) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{137}
+ return fileDescriptor_83c10c24ec417dc9, []int{138}
}
func (m *PodResourceClaim) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3913,10 +3941,38 @@ func (m *PodResourceClaim) XXX_DiscardUnknown() {
var xxx_messageInfo_PodResourceClaim proto.InternalMessageInfo
+func (m *PodResourceClaimStatus) Reset() { *m = PodResourceClaimStatus{} }
+func (*PodResourceClaimStatus) ProtoMessage() {}
+func (*PodResourceClaimStatus) Descriptor() ([]byte, []int) {
+ return fileDescriptor_83c10c24ec417dc9, []int{139}
+}
+func (m *PodResourceClaimStatus) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *PodResourceClaimStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *PodResourceClaimStatus) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_PodResourceClaimStatus.Merge(m, src)
+}
+func (m *PodResourceClaimStatus) XXX_Size() int {
+ return m.Size()
+}
+func (m *PodResourceClaimStatus) XXX_DiscardUnknown() {
+ xxx_messageInfo_PodResourceClaimStatus.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PodResourceClaimStatus proto.InternalMessageInfo
+
func (m *PodSchedulingGate) Reset() { *m = PodSchedulingGate{} }
func (*PodSchedulingGate) ProtoMessage() {}
func (*PodSchedulingGate) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{138}
+ return fileDescriptor_83c10c24ec417dc9, []int{140}
}
func (m *PodSchedulingGate) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3944,7 +4000,7 @@ var xxx_messageInfo_PodSchedulingGate proto.InternalMessageInfo
func (m *PodSecurityContext) Reset() { *m = PodSecurityContext{} }
func (*PodSecurityContext) ProtoMessage() {}
func (*PodSecurityContext) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{139}
+ return fileDescriptor_83c10c24ec417dc9, []int{141}
}
func (m *PodSecurityContext) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3972,7 +4028,7 @@ var xxx_messageInfo_PodSecurityContext proto.InternalMessageInfo
func (m *PodSignature) Reset() { *m = PodSignature{} }
func (*PodSignature) ProtoMessage() {}
func (*PodSignature) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{140}
+ return fileDescriptor_83c10c24ec417dc9, []int{142}
}
func (m *PodSignature) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4000,7 +4056,7 @@ var xxx_messageInfo_PodSignature proto.InternalMessageInfo
func (m *PodSpec) Reset() { *m = PodSpec{} }
func (*PodSpec) ProtoMessage() {}
func (*PodSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{141}
+ return fileDescriptor_83c10c24ec417dc9, []int{143}
}
func (m *PodSpec) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4028,7 +4084,7 @@ var xxx_messageInfo_PodSpec proto.InternalMessageInfo
func (m *PodStatus) Reset() { *m = PodStatus{} }
func (*PodStatus) ProtoMessage() {}
func (*PodStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{142}
+ return fileDescriptor_83c10c24ec417dc9, []int{144}
}
func (m *PodStatus) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4056,7 +4112,7 @@ var xxx_messageInfo_PodStatus proto.InternalMessageInfo
func (m *PodStatusResult) Reset() { *m = PodStatusResult{} }
func (*PodStatusResult) ProtoMessage() {}
func (*PodStatusResult) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{143}
+ return fileDescriptor_83c10c24ec417dc9, []int{145}
}
func (m *PodStatusResult) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4084,7 +4140,7 @@ var xxx_messageInfo_PodStatusResult proto.InternalMessageInfo
func (m *PodTemplate) Reset() { *m = PodTemplate{} }
func (*PodTemplate) ProtoMessage() {}
func (*PodTemplate) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{144}
+ return fileDescriptor_83c10c24ec417dc9, []int{146}
}
func (m *PodTemplate) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4112,7 +4168,7 @@ var xxx_messageInfo_PodTemplate proto.InternalMessageInfo
func (m *PodTemplateList) Reset() { *m = PodTemplateList{} }
func (*PodTemplateList) ProtoMessage() {}
func (*PodTemplateList) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{145}
+ return fileDescriptor_83c10c24ec417dc9, []int{147}
}
func (m *PodTemplateList) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4140,7 +4196,7 @@ var xxx_messageInfo_PodTemplateList proto.InternalMessageInfo
func (m *PodTemplateSpec) Reset() { *m = PodTemplateSpec{} }
func (*PodTemplateSpec) ProtoMessage() {}
func (*PodTemplateSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{146}
+ return fileDescriptor_83c10c24ec417dc9, []int{148}
}
func (m *PodTemplateSpec) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4168,7 +4224,7 @@ var xxx_messageInfo_PodTemplateSpec proto.InternalMessageInfo
func (m *PortStatus) Reset() { *m = PortStatus{} }
func (*PortStatus) ProtoMessage() {}
func (*PortStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{147}
+ return fileDescriptor_83c10c24ec417dc9, []int{149}
}
func (m *PortStatus) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4196,7 +4252,7 @@ var xxx_messageInfo_PortStatus proto.InternalMessageInfo
func (m *PortworxVolumeSource) Reset() { *m = PortworxVolumeSource{} }
func (*PortworxVolumeSource) ProtoMessage() {}
func (*PortworxVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{148}
+ return fileDescriptor_83c10c24ec417dc9, []int{150}
}
func (m *PortworxVolumeSource) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4224,7 +4280,7 @@ var xxx_messageInfo_PortworxVolumeSource proto.InternalMessageInfo
func (m *Preconditions) Reset() { *m = Preconditions{} }
func (*Preconditions) ProtoMessage() {}
func (*Preconditions) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{149}
+ return fileDescriptor_83c10c24ec417dc9, []int{151}
}
func (m *Preconditions) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4252,7 +4308,7 @@ var xxx_messageInfo_Preconditions proto.InternalMessageInfo
func (m *PreferAvoidPodsEntry) Reset() { *m = PreferAvoidPodsEntry{} }
func (*PreferAvoidPodsEntry) ProtoMessage() {}
func (*PreferAvoidPodsEntry) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{150}
+ return fileDescriptor_83c10c24ec417dc9, []int{152}
}
func (m *PreferAvoidPodsEntry) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4280,7 +4336,7 @@ var xxx_messageInfo_PreferAvoidPodsEntry proto.InternalMessageInfo
func (m *PreferredSchedulingTerm) Reset() { *m = PreferredSchedulingTerm{} }
func (*PreferredSchedulingTerm) ProtoMessage() {}
func (*PreferredSchedulingTerm) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{151}
+ return fileDescriptor_83c10c24ec417dc9, []int{153}
}
func (m *PreferredSchedulingTerm) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4308,7 +4364,7 @@ var xxx_messageInfo_PreferredSchedulingTerm proto.InternalMessageInfo
func (m *Probe) Reset() { *m = Probe{} }
func (*Probe) ProtoMessage() {}
func (*Probe) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{152}
+ return fileDescriptor_83c10c24ec417dc9, []int{154}
}
func (m *Probe) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4336,7 +4392,7 @@ var xxx_messageInfo_Probe proto.InternalMessageInfo
func (m *ProbeHandler) Reset() { *m = ProbeHandler{} }
func (*ProbeHandler) ProtoMessage() {}
func (*ProbeHandler) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{153}
+ return fileDescriptor_83c10c24ec417dc9, []int{155}
}
func (m *ProbeHandler) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4364,7 +4420,7 @@ var xxx_messageInfo_ProbeHandler proto.InternalMessageInfo
func (m *ProjectedVolumeSource) Reset() { *m = ProjectedVolumeSource{} }
func (*ProjectedVolumeSource) ProtoMessage() {}
func (*ProjectedVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{154}
+ return fileDescriptor_83c10c24ec417dc9, []int{156}
}
func (m *ProjectedVolumeSource) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4392,7 +4448,7 @@ var xxx_messageInfo_ProjectedVolumeSource proto.InternalMessageInfo
func (m *QuobyteVolumeSource) Reset() { *m = QuobyteVolumeSource{} }
func (*QuobyteVolumeSource) ProtoMessage() {}
func (*QuobyteVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{155}
+ return fileDescriptor_83c10c24ec417dc9, []int{157}
}
func (m *QuobyteVolumeSource) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4420,7 +4476,7 @@ var xxx_messageInfo_QuobyteVolumeSource proto.InternalMessageInfo
func (m *RBDPersistentVolumeSource) Reset() { *m = RBDPersistentVolumeSource{} }
func (*RBDPersistentVolumeSource) ProtoMessage() {}
func (*RBDPersistentVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{156}
+ return fileDescriptor_83c10c24ec417dc9, []int{158}
}
func (m *RBDPersistentVolumeSource) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4448,7 +4504,7 @@ var xxx_messageInfo_RBDPersistentVolumeSource proto.InternalMessageInfo
func (m *RBDVolumeSource) Reset() { *m = RBDVolumeSource{} }
func (*RBDVolumeSource) ProtoMessage() {}
func (*RBDVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{157}
+ return fileDescriptor_83c10c24ec417dc9, []int{159}
}
func (m *RBDVolumeSource) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4476,7 +4532,7 @@ var xxx_messageInfo_RBDVolumeSource proto.InternalMessageInfo
func (m *RangeAllocation) Reset() { *m = RangeAllocation{} }
func (*RangeAllocation) ProtoMessage() {}
func (*RangeAllocation) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{158}
+ return fileDescriptor_83c10c24ec417dc9, []int{160}
}
func (m *RangeAllocation) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4504,7 +4560,7 @@ var xxx_messageInfo_RangeAllocation proto.InternalMessageInfo
func (m *ReplicationController) Reset() { *m = ReplicationController{} }
func (*ReplicationController) ProtoMessage() {}
func (*ReplicationController) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{159}
+ return fileDescriptor_83c10c24ec417dc9, []int{161}
}
func (m *ReplicationController) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4532,7 +4588,7 @@ var xxx_messageInfo_ReplicationController proto.InternalMessageInfo
func (m *ReplicationControllerCondition) Reset() { *m = ReplicationControllerCondition{} }
func (*ReplicationControllerCondition) ProtoMessage() {}
func (*ReplicationControllerCondition) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{160}
+ return fileDescriptor_83c10c24ec417dc9, []int{162}
}
func (m *ReplicationControllerCondition) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4560,7 +4616,7 @@ var xxx_messageInfo_ReplicationControllerCondition proto.InternalMessageInfo
func (m *ReplicationControllerList) Reset() { *m = ReplicationControllerList{} }
func (*ReplicationControllerList) ProtoMessage() {}
func (*ReplicationControllerList) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{161}
+ return fileDescriptor_83c10c24ec417dc9, []int{163}
}
func (m *ReplicationControllerList) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4588,7 +4644,7 @@ var xxx_messageInfo_ReplicationControllerList proto.InternalMessageInfo
func (m *ReplicationControllerSpec) Reset() { *m = ReplicationControllerSpec{} }
func (*ReplicationControllerSpec) ProtoMessage() {}
func (*ReplicationControllerSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{162}
+ return fileDescriptor_83c10c24ec417dc9, []int{164}
}
func (m *ReplicationControllerSpec) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4616,7 +4672,7 @@ var xxx_messageInfo_ReplicationControllerSpec proto.InternalMessageInfo
func (m *ReplicationControllerStatus) Reset() { *m = ReplicationControllerStatus{} }
func (*ReplicationControllerStatus) ProtoMessage() {}
func (*ReplicationControllerStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{163}
+ return fileDescriptor_83c10c24ec417dc9, []int{165}
}
func (m *ReplicationControllerStatus) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4644,7 +4700,7 @@ var xxx_messageInfo_ReplicationControllerStatus proto.InternalMessageInfo
func (m *ResourceClaim) Reset() { *m = ResourceClaim{} }
func (*ResourceClaim) ProtoMessage() {}
func (*ResourceClaim) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{164}
+ return fileDescriptor_83c10c24ec417dc9, []int{166}
}
func (m *ResourceClaim) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4672,7 +4728,7 @@ var xxx_messageInfo_ResourceClaim proto.InternalMessageInfo
func (m *ResourceFieldSelector) Reset() { *m = ResourceFieldSelector{} }
func (*ResourceFieldSelector) ProtoMessage() {}
func (*ResourceFieldSelector) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{165}
+ return fileDescriptor_83c10c24ec417dc9, []int{167}
}
func (m *ResourceFieldSelector) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4700,7 +4756,7 @@ var xxx_messageInfo_ResourceFieldSelector proto.InternalMessageInfo
func (m *ResourceQuota) Reset() { *m = ResourceQuota{} }
func (*ResourceQuota) ProtoMessage() {}
func (*ResourceQuota) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{166}
+ return fileDescriptor_83c10c24ec417dc9, []int{168}
}
func (m *ResourceQuota) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4728,7 +4784,7 @@ var xxx_messageInfo_ResourceQuota proto.InternalMessageInfo
func (m *ResourceQuotaList) Reset() { *m = ResourceQuotaList{} }
func (*ResourceQuotaList) ProtoMessage() {}
func (*ResourceQuotaList) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{167}
+ return fileDescriptor_83c10c24ec417dc9, []int{169}
}
func (m *ResourceQuotaList) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4756,7 +4812,7 @@ var xxx_messageInfo_ResourceQuotaList proto.InternalMessageInfo
func (m *ResourceQuotaSpec) Reset() { *m = ResourceQuotaSpec{} }
func (*ResourceQuotaSpec) ProtoMessage() {}
func (*ResourceQuotaSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{168}
+ return fileDescriptor_83c10c24ec417dc9, []int{170}
}
func (m *ResourceQuotaSpec) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4784,7 +4840,7 @@ var xxx_messageInfo_ResourceQuotaSpec proto.InternalMessageInfo
func (m *ResourceQuotaStatus) Reset() { *m = ResourceQuotaStatus{} }
func (*ResourceQuotaStatus) ProtoMessage() {}
func (*ResourceQuotaStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{169}
+ return fileDescriptor_83c10c24ec417dc9, []int{171}
}
func (m *ResourceQuotaStatus) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4812,7 +4868,7 @@ var xxx_messageInfo_ResourceQuotaStatus proto.InternalMessageInfo
func (m *ResourceRequirements) Reset() { *m = ResourceRequirements{} }
func (*ResourceRequirements) ProtoMessage() {}
func (*ResourceRequirements) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{170}
+ return fileDescriptor_83c10c24ec417dc9, []int{172}
}
func (m *ResourceRequirements) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4840,7 +4896,7 @@ var xxx_messageInfo_ResourceRequirements proto.InternalMessageInfo
func (m *SELinuxOptions) Reset() { *m = SELinuxOptions{} }
func (*SELinuxOptions) ProtoMessage() {}
func (*SELinuxOptions) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{171}
+ return fileDescriptor_83c10c24ec417dc9, []int{173}
}
func (m *SELinuxOptions) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4868,7 +4924,7 @@ var xxx_messageInfo_SELinuxOptions proto.InternalMessageInfo
func (m *ScaleIOPersistentVolumeSource) Reset() { *m = ScaleIOPersistentVolumeSource{} }
func (*ScaleIOPersistentVolumeSource) ProtoMessage() {}
func (*ScaleIOPersistentVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{172}
+ return fileDescriptor_83c10c24ec417dc9, []int{174}
}
func (m *ScaleIOPersistentVolumeSource) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4896,7 +4952,7 @@ var xxx_messageInfo_ScaleIOPersistentVolumeSource proto.InternalMessageInfo
func (m *ScaleIOVolumeSource) Reset() { *m = ScaleIOVolumeSource{} }
func (*ScaleIOVolumeSource) ProtoMessage() {}
func (*ScaleIOVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{173}
+ return fileDescriptor_83c10c24ec417dc9, []int{175}
}
func (m *ScaleIOVolumeSource) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4924,7 +4980,7 @@ var xxx_messageInfo_ScaleIOVolumeSource proto.InternalMessageInfo
func (m *ScopeSelector) Reset() { *m = ScopeSelector{} }
func (*ScopeSelector) ProtoMessage() {}
func (*ScopeSelector) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{174}
+ return fileDescriptor_83c10c24ec417dc9, []int{176}
}
func (m *ScopeSelector) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4952,7 +5008,7 @@ var xxx_messageInfo_ScopeSelector proto.InternalMessageInfo
func (m *ScopedResourceSelectorRequirement) Reset() { *m = ScopedResourceSelectorRequirement{} }
func (*ScopedResourceSelectorRequirement) ProtoMessage() {}
func (*ScopedResourceSelectorRequirement) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{175}
+ return fileDescriptor_83c10c24ec417dc9, []int{177}
}
func (m *ScopedResourceSelectorRequirement) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4980,7 +5036,7 @@ var xxx_messageInfo_ScopedResourceSelectorRequirement proto.InternalMessageInfo
func (m *SeccompProfile) Reset() { *m = SeccompProfile{} }
func (*SeccompProfile) ProtoMessage() {}
func (*SeccompProfile) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{176}
+ return fileDescriptor_83c10c24ec417dc9, []int{178}
}
func (m *SeccompProfile) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5008,7 +5064,7 @@ var xxx_messageInfo_SeccompProfile proto.InternalMessageInfo
func (m *Secret) Reset() { *m = Secret{} }
func (*Secret) ProtoMessage() {}
func (*Secret) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{177}
+ return fileDescriptor_83c10c24ec417dc9, []int{179}
}
func (m *Secret) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5036,7 +5092,7 @@ var xxx_messageInfo_Secret proto.InternalMessageInfo
func (m *SecretEnvSource) Reset() { *m = SecretEnvSource{} }
func (*SecretEnvSource) ProtoMessage() {}
func (*SecretEnvSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{178}
+ return fileDescriptor_83c10c24ec417dc9, []int{180}
}
func (m *SecretEnvSource) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5064,7 +5120,7 @@ var xxx_messageInfo_SecretEnvSource proto.InternalMessageInfo
func (m *SecretKeySelector) Reset() { *m = SecretKeySelector{} }
func (*SecretKeySelector) ProtoMessage() {}
func (*SecretKeySelector) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{179}
+ return fileDescriptor_83c10c24ec417dc9, []int{181}
}
func (m *SecretKeySelector) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5092,7 +5148,7 @@ var xxx_messageInfo_SecretKeySelector proto.InternalMessageInfo
func (m *SecretList) Reset() { *m = SecretList{} }
func (*SecretList) ProtoMessage() {}
func (*SecretList) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{180}
+ return fileDescriptor_83c10c24ec417dc9, []int{182}
}
func (m *SecretList) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5120,7 +5176,7 @@ var xxx_messageInfo_SecretList proto.InternalMessageInfo
func (m *SecretProjection) Reset() { *m = SecretProjection{} }
func (*SecretProjection) ProtoMessage() {}
func (*SecretProjection) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{181}
+ return fileDescriptor_83c10c24ec417dc9, []int{183}
}
func (m *SecretProjection) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5148,7 +5204,7 @@ var xxx_messageInfo_SecretProjection proto.InternalMessageInfo
func (m *SecretReference) Reset() { *m = SecretReference{} }
func (*SecretReference) ProtoMessage() {}
func (*SecretReference) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{182}
+ return fileDescriptor_83c10c24ec417dc9, []int{184}
}
func (m *SecretReference) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5176,7 +5232,7 @@ var xxx_messageInfo_SecretReference proto.InternalMessageInfo
func (m *SecretVolumeSource) Reset() { *m = SecretVolumeSource{} }
func (*SecretVolumeSource) ProtoMessage() {}
func (*SecretVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{183}
+ return fileDescriptor_83c10c24ec417dc9, []int{185}
}
func (m *SecretVolumeSource) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5204,7 +5260,7 @@ var xxx_messageInfo_SecretVolumeSource proto.InternalMessageInfo
func (m *SecurityContext) Reset() { *m = SecurityContext{} }
func (*SecurityContext) ProtoMessage() {}
func (*SecurityContext) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{184}
+ return fileDescriptor_83c10c24ec417dc9, []int{186}
}
func (m *SecurityContext) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5232,7 +5288,7 @@ var xxx_messageInfo_SecurityContext proto.InternalMessageInfo
func (m *SerializedReference) Reset() { *m = SerializedReference{} }
func (*SerializedReference) ProtoMessage() {}
func (*SerializedReference) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{185}
+ return fileDescriptor_83c10c24ec417dc9, []int{187}
}
func (m *SerializedReference) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5260,7 +5316,7 @@ var xxx_messageInfo_SerializedReference proto.InternalMessageInfo
func (m *Service) Reset() { *m = Service{} }
func (*Service) ProtoMessage() {}
func (*Service) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{186}
+ return fileDescriptor_83c10c24ec417dc9, []int{188}
}
func (m *Service) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5288,7 +5344,7 @@ var xxx_messageInfo_Service proto.InternalMessageInfo
func (m *ServiceAccount) Reset() { *m = ServiceAccount{} }
func (*ServiceAccount) ProtoMessage() {}
func (*ServiceAccount) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{187}
+ return fileDescriptor_83c10c24ec417dc9, []int{189}
}
func (m *ServiceAccount) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5316,7 +5372,7 @@ var xxx_messageInfo_ServiceAccount proto.InternalMessageInfo
func (m *ServiceAccountList) Reset() { *m = ServiceAccountList{} }
func (*ServiceAccountList) ProtoMessage() {}
func (*ServiceAccountList) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{188}
+ return fileDescriptor_83c10c24ec417dc9, []int{190}
}
func (m *ServiceAccountList) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5344,7 +5400,7 @@ var xxx_messageInfo_ServiceAccountList proto.InternalMessageInfo
func (m *ServiceAccountTokenProjection) Reset() { *m = ServiceAccountTokenProjection{} }
func (*ServiceAccountTokenProjection) ProtoMessage() {}
func (*ServiceAccountTokenProjection) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{189}
+ return fileDescriptor_83c10c24ec417dc9, []int{191}
}
func (m *ServiceAccountTokenProjection) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5372,7 +5428,7 @@ var xxx_messageInfo_ServiceAccountTokenProjection proto.InternalMessageInfo
func (m *ServiceList) Reset() { *m = ServiceList{} }
func (*ServiceList) ProtoMessage() {}
func (*ServiceList) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{190}
+ return fileDescriptor_83c10c24ec417dc9, []int{192}
}
func (m *ServiceList) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5400,7 +5456,7 @@ var xxx_messageInfo_ServiceList proto.InternalMessageInfo
func (m *ServicePort) Reset() { *m = ServicePort{} }
func (*ServicePort) ProtoMessage() {}
func (*ServicePort) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{191}
+ return fileDescriptor_83c10c24ec417dc9, []int{193}
}
func (m *ServicePort) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5428,7 +5484,7 @@ var xxx_messageInfo_ServicePort proto.InternalMessageInfo
func (m *ServiceProxyOptions) Reset() { *m = ServiceProxyOptions{} }
func (*ServiceProxyOptions) ProtoMessage() {}
func (*ServiceProxyOptions) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{192}
+ return fileDescriptor_83c10c24ec417dc9, []int{194}
}
func (m *ServiceProxyOptions) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5456,7 +5512,7 @@ var xxx_messageInfo_ServiceProxyOptions proto.InternalMessageInfo
func (m *ServiceSpec) Reset() { *m = ServiceSpec{} }
func (*ServiceSpec) ProtoMessage() {}
func (*ServiceSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{193}
+ return fileDescriptor_83c10c24ec417dc9, []int{195}
}
func (m *ServiceSpec) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5484,7 +5540,7 @@ var xxx_messageInfo_ServiceSpec proto.InternalMessageInfo
func (m *ServiceStatus) Reset() { *m = ServiceStatus{} }
func (*ServiceStatus) ProtoMessage() {}
func (*ServiceStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{194}
+ return fileDescriptor_83c10c24ec417dc9, []int{196}
}
func (m *ServiceStatus) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5512,7 +5568,7 @@ var xxx_messageInfo_ServiceStatus proto.InternalMessageInfo
func (m *SessionAffinityConfig) Reset() { *m = SessionAffinityConfig{} }
func (*SessionAffinityConfig) ProtoMessage() {}
func (*SessionAffinityConfig) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{195}
+ return fileDescriptor_83c10c24ec417dc9, []int{197}
}
func (m *SessionAffinityConfig) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5540,7 +5596,7 @@ var xxx_messageInfo_SessionAffinityConfig proto.InternalMessageInfo
func (m *StorageOSPersistentVolumeSource) Reset() { *m = StorageOSPersistentVolumeSource{} }
func (*StorageOSPersistentVolumeSource) ProtoMessage() {}
func (*StorageOSPersistentVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{196}
+ return fileDescriptor_83c10c24ec417dc9, []int{198}
}
func (m *StorageOSPersistentVolumeSource) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5568,7 +5624,7 @@ var xxx_messageInfo_StorageOSPersistentVolumeSource proto.InternalMessageInfo
func (m *StorageOSVolumeSource) Reset() { *m = StorageOSVolumeSource{} }
func (*StorageOSVolumeSource) ProtoMessage() {}
func (*StorageOSVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{197}
+ return fileDescriptor_83c10c24ec417dc9, []int{199}
}
func (m *StorageOSVolumeSource) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5596,7 +5652,7 @@ var xxx_messageInfo_StorageOSVolumeSource proto.InternalMessageInfo
func (m *Sysctl) Reset() { *m = Sysctl{} }
func (*Sysctl) ProtoMessage() {}
func (*Sysctl) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{198}
+ return fileDescriptor_83c10c24ec417dc9, []int{200}
}
func (m *Sysctl) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5624,7 +5680,7 @@ var xxx_messageInfo_Sysctl proto.InternalMessageInfo
func (m *TCPSocketAction) Reset() { *m = TCPSocketAction{} }
func (*TCPSocketAction) ProtoMessage() {}
func (*TCPSocketAction) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{199}
+ return fileDescriptor_83c10c24ec417dc9, []int{201}
}
func (m *TCPSocketAction) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5652,7 +5708,7 @@ var xxx_messageInfo_TCPSocketAction proto.InternalMessageInfo
func (m *Taint) Reset() { *m = Taint{} }
func (*Taint) ProtoMessage() {}
func (*Taint) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{200}
+ return fileDescriptor_83c10c24ec417dc9, []int{202}
}
func (m *Taint) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5680,7 +5736,7 @@ var xxx_messageInfo_Taint proto.InternalMessageInfo
func (m *Toleration) Reset() { *m = Toleration{} }
func (*Toleration) ProtoMessage() {}
func (*Toleration) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{201}
+ return fileDescriptor_83c10c24ec417dc9, []int{203}
}
func (m *Toleration) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5708,7 +5764,7 @@ var xxx_messageInfo_Toleration proto.InternalMessageInfo
func (m *TopologySelectorLabelRequirement) Reset() { *m = TopologySelectorLabelRequirement{} }
func (*TopologySelectorLabelRequirement) ProtoMessage() {}
func (*TopologySelectorLabelRequirement) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{202}
+ return fileDescriptor_83c10c24ec417dc9, []int{204}
}
func (m *TopologySelectorLabelRequirement) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5736,7 +5792,7 @@ var xxx_messageInfo_TopologySelectorLabelRequirement proto.InternalMessageInfo
func (m *TopologySelectorTerm) Reset() { *m = TopologySelectorTerm{} }
func (*TopologySelectorTerm) ProtoMessage() {}
func (*TopologySelectorTerm) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{203}
+ return fileDescriptor_83c10c24ec417dc9, []int{205}
}
func (m *TopologySelectorTerm) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5764,7 +5820,7 @@ var xxx_messageInfo_TopologySelectorTerm proto.InternalMessageInfo
func (m *TopologySpreadConstraint) Reset() { *m = TopologySpreadConstraint{} }
func (*TopologySpreadConstraint) ProtoMessage() {}
func (*TopologySpreadConstraint) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{204}
+ return fileDescriptor_83c10c24ec417dc9, []int{206}
}
func (m *TopologySpreadConstraint) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5792,7 +5848,7 @@ var xxx_messageInfo_TopologySpreadConstraint proto.InternalMessageInfo
func (m *TypedLocalObjectReference) Reset() { *m = TypedLocalObjectReference{} }
func (*TypedLocalObjectReference) ProtoMessage() {}
func (*TypedLocalObjectReference) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{205}
+ return fileDescriptor_83c10c24ec417dc9, []int{207}
}
func (m *TypedLocalObjectReference) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5820,7 +5876,7 @@ var xxx_messageInfo_TypedLocalObjectReference proto.InternalMessageInfo
func (m *TypedObjectReference) Reset() { *m = TypedObjectReference{} }
func (*TypedObjectReference) ProtoMessage() {}
func (*TypedObjectReference) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{206}
+ return fileDescriptor_83c10c24ec417dc9, []int{208}
}
func (m *TypedObjectReference) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5848,7 +5904,7 @@ var xxx_messageInfo_TypedObjectReference proto.InternalMessageInfo
func (m *Volume) Reset() { *m = Volume{} }
func (*Volume) ProtoMessage() {}
func (*Volume) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{207}
+ return fileDescriptor_83c10c24ec417dc9, []int{209}
}
func (m *Volume) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5876,7 +5932,7 @@ var xxx_messageInfo_Volume proto.InternalMessageInfo
func (m *VolumeDevice) Reset() { *m = VolumeDevice{} }
func (*VolumeDevice) ProtoMessage() {}
func (*VolumeDevice) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{208}
+ return fileDescriptor_83c10c24ec417dc9, []int{210}
}
func (m *VolumeDevice) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5904,7 +5960,7 @@ var xxx_messageInfo_VolumeDevice proto.InternalMessageInfo
func (m *VolumeMount) Reset() { *m = VolumeMount{} }
func (*VolumeMount) ProtoMessage() {}
func (*VolumeMount) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{209}
+ return fileDescriptor_83c10c24ec417dc9, []int{211}
}
func (m *VolumeMount) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5932,7 +5988,7 @@ var xxx_messageInfo_VolumeMount proto.InternalMessageInfo
func (m *VolumeNodeAffinity) Reset() { *m = VolumeNodeAffinity{} }
func (*VolumeNodeAffinity) ProtoMessage() {}
func (*VolumeNodeAffinity) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{210}
+ return fileDescriptor_83c10c24ec417dc9, []int{212}
}
func (m *VolumeNodeAffinity) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5960,7 +6016,7 @@ var xxx_messageInfo_VolumeNodeAffinity proto.InternalMessageInfo
func (m *VolumeProjection) Reset() { *m = VolumeProjection{} }
func (*VolumeProjection) ProtoMessage() {}
func (*VolumeProjection) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{211}
+ return fileDescriptor_83c10c24ec417dc9, []int{213}
}
func (m *VolumeProjection) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5988,7 +6044,7 @@ var xxx_messageInfo_VolumeProjection proto.InternalMessageInfo
func (m *VolumeSource) Reset() { *m = VolumeSource{} }
func (*VolumeSource) ProtoMessage() {}
func (*VolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{212}
+ return fileDescriptor_83c10c24ec417dc9, []int{214}
}
func (m *VolumeSource) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -6016,7 +6072,7 @@ var xxx_messageInfo_VolumeSource proto.InternalMessageInfo
func (m *VsphereVirtualDiskVolumeSource) Reset() { *m = VsphereVirtualDiskVolumeSource{} }
func (*VsphereVirtualDiskVolumeSource) ProtoMessage() {}
func (*VsphereVirtualDiskVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{213}
+ return fileDescriptor_83c10c24ec417dc9, []int{215}
}
func (m *VsphereVirtualDiskVolumeSource) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -6044,7 +6100,7 @@ var xxx_messageInfo_VsphereVirtualDiskVolumeSource proto.InternalMessageInfo
func (m *WeightedPodAffinityTerm) Reset() { *m = WeightedPodAffinityTerm{} }
func (*WeightedPodAffinityTerm) ProtoMessage() {}
func (*WeightedPodAffinityTerm) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{214}
+ return fileDescriptor_83c10c24ec417dc9, []int{216}
}
func (m *WeightedPodAffinityTerm) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -6072,7 +6128,7 @@ var xxx_messageInfo_WeightedPodAffinityTerm proto.InternalMessageInfo
func (m *WindowsSecurityContextOptions) Reset() { *m = WindowsSecurityContextOptions{} }
func (*WindowsSecurityContextOptions) ProtoMessage() {}
func (*WindowsSecurityContextOptions) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{215}
+ return fileDescriptor_83c10c24ec417dc9, []int{217}
}
func (m *WindowsSecurityContextOptions) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -6174,6 +6230,7 @@ func init() {
proto.RegisterType((*HTTPGetAction)(nil), "k8s.io.api.core.v1.HTTPGetAction")
proto.RegisterType((*HTTPHeader)(nil), "k8s.io.api.core.v1.HTTPHeader")
proto.RegisterType((*HostAlias)(nil), "k8s.io.api.core.v1.HostAlias")
+ proto.RegisterType((*HostIP)(nil), "k8s.io.api.core.v1.HostIP")
proto.RegisterType((*HostPathVolumeSource)(nil), "k8s.io.api.core.v1.HostPathVolumeSource")
proto.RegisterType((*ISCSIPersistentVolumeSource)(nil), "k8s.io.api.core.v1.ISCSIPersistentVolumeSource")
proto.RegisterType((*ISCSIVolumeSource)(nil), "k8s.io.api.core.v1.ISCSIVolumeSource")
@@ -6227,6 +6284,7 @@ func init() {
proto.RegisterType((*PersistentVolumeClaimList)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimList")
proto.RegisterType((*PersistentVolumeClaimSpec)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimSpec")
proto.RegisterType((*PersistentVolumeClaimStatus)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimStatus")
+ proto.RegisterMapType((map[ResourceName]ClaimResourceStatus)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimStatus.AllocatedResourceStatusesEntry")
proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimStatus.AllocatedResourcesEntry")
proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimStatus.CapacityEntry")
proto.RegisterType((*PersistentVolumeClaimTemplate)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimTemplate")
@@ -6254,6 +6312,7 @@ func init() {
proto.RegisterType((*PodProxyOptions)(nil), "k8s.io.api.core.v1.PodProxyOptions")
proto.RegisterType((*PodReadinessGate)(nil), "k8s.io.api.core.v1.PodReadinessGate")
proto.RegisterType((*PodResourceClaim)(nil), "k8s.io.api.core.v1.PodResourceClaim")
+ proto.RegisterType((*PodResourceClaimStatus)(nil), "k8s.io.api.core.v1.PodResourceClaimStatus")
proto.RegisterType((*PodSchedulingGate)(nil), "k8s.io.api.core.v1.PodSchedulingGate")
proto.RegisterType((*PodSecurityContext)(nil), "k8s.io.api.core.v1.PodSecurityContext")
proto.RegisterType((*PodSignature)(nil), "k8s.io.api.core.v1.PodSignature")
@@ -6350,925 +6409,934 @@ func init() {
}
var fileDescriptor_83c10c24ec417dc9 = []byte{
- // 14685 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0xbd, 0x69, 0x90, 0x5c, 0xd7,
- 0x75, 0x18, 0xac, 0xd7, 0x3d, 0x5b, 0x9f, 0xd9, 0xef, 0x00, 0xe0, 0x60, 0x48, 0xa0, 0xc1, 0x47,
- 0x12, 0x04, 0x45, 0x72, 0x20, 0x70, 0x91, 0x28, 0x52, 0xa2, 0x35, 0x2b, 0x30, 0x04, 0x66, 0xd0,
- 0xbc, 0x3d, 0x00, 0x24, 0x8a, 0x52, 0xe9, 0x4d, 0xf7, 0x9d, 0x99, 0xa7, 0xe9, 0x7e, 0xaf, 0xf9,
- 0xde, 0xeb, 0x01, 0x06, 0x9f, 0x54, 0x9f, 0x2d, 0xc7, 0x8b, 0x6c, 0x27, 0xa5, 0x4a, 0x39, 0x4b,
- 0xc9, 0x2e, 0x57, 0xca, 0x76, 0x6c, 0x2b, 0xca, 0xa6, 0xc8, 0xb1, 0x1d, 0xcb, 0x5b, 0xb6, 0x8a,
- 0x93, 0x4a, 0x39, 0x8e, 0xab, 0x62, 0xb9, 0xe2, 0xca, 0xc4, 0x82, 0x53, 0xe5, 0x52, 0x55, 0x62,
- 0x3b, 0xcb, 0x8f, 0x64, 0xe2, 0xc4, 0xa9, 0xbb, 0xbe, 0x7b, 0xdf, 0xd2, 0xdd, 0x03, 0x0e, 0x46,
- 0x94, 0x8a, 0xff, 0xba, 0xcf, 0x39, 0xf7, 0xdc, 0xfb, 0xee, 0x7a, 0xee, 0x39, 0xe7, 0x9e, 0x03,
- 0xaf, 0xec, 0xbc, 0x14, 0xce, 0xba, 0xfe, 0xc5, 0x9d, 0xf6, 0x06, 0x09, 0x3c, 0x12, 0x91, 0xf0,
- 0xe2, 0x2e, 0xf1, 0xea, 0x7e, 0x70, 0x51, 0x20, 0x9c, 0x96, 0x7b, 0xb1, 0xe6, 0x07, 0xe4, 0xe2,
- 0xee, 0xa5, 0x8b, 0x5b, 0xc4, 0x23, 0x81, 0x13, 0x91, 0xfa, 0x6c, 0x2b, 0xf0, 0x23, 0x1f, 0x21,
- 0x4e, 0x33, 0xeb, 0xb4, 0xdc, 0x59, 0x4a, 0x33, 0xbb, 0x7b, 0x69, 0xe6, 0xd9, 0x2d, 0x37, 0xda,
- 0x6e, 0x6f, 0xcc, 0xd6, 0xfc, 0xe6, 0xc5, 0x2d, 0x7f, 0xcb, 0xbf, 0xc8, 0x48, 0x37, 0xda, 0x9b,
- 0xec, 0x1f, 0xfb, 0xc3, 0x7e, 0x71, 0x16, 0x33, 0x2f, 0xc4, 0xd5, 0x34, 0x9d, 0xda, 0xb6, 0xeb,
- 0x91, 0x60, 0xef, 0x62, 0x6b, 0x67, 0x8b, 0xd5, 0x1b, 0x90, 0xd0, 0x6f, 0x07, 0x35, 0x92, 0xac,
- 0xb8, 0x63, 0xa9, 0xf0, 0x62, 0x93, 0x44, 0x4e, 0x46, 0x73, 0x67, 0x2e, 0xe6, 0x95, 0x0a, 0xda,
- 0x5e, 0xe4, 0x36, 0xd3, 0xd5, 0xbc, 0xbf, 0x5b, 0x81, 0xb0, 0xb6, 0x4d, 0x9a, 0x4e, 0xaa, 0xdc,
- 0xf3, 0x79, 0xe5, 0xda, 0x91, 0xdb, 0xb8, 0xe8, 0x7a, 0x51, 0x18, 0x05, 0xc9, 0x42, 0xf6, 0xd7,
- 0x2d, 0x38, 0x37, 0x77, 0xab, 0xba, 0xd4, 0x70, 0xc2, 0xc8, 0xad, 0xcd, 0x37, 0xfc, 0xda, 0x4e,
- 0x35, 0xf2, 0x03, 0x72, 0xd3, 0x6f, 0xb4, 0x9b, 0xa4, 0xca, 0x3a, 0x02, 0x3d, 0x03, 0x43, 0xbb,
- 0xec, 0xff, 0xca, 0xe2, 0xb4, 0x75, 0xce, 0xba, 0x50, 0x9a, 0x9f, 0xf8, 0xcd, 0xfd, 0xf2, 0x7b,
- 0xee, 0xed, 0x97, 0x87, 0x6e, 0x0a, 0x38, 0x56, 0x14, 0xe8, 0x3c, 0x0c, 0x6c, 0x86, 0xeb, 0x7b,
- 0x2d, 0x32, 0x5d, 0x60, 0xb4, 0x63, 0x82, 0x76, 0x60, 0xb9, 0x4a, 0xa1, 0x58, 0x60, 0xd1, 0x45,
- 0x28, 0xb5, 0x9c, 0x20, 0x72, 0x23, 0xd7, 0xf7, 0xa6, 0x8b, 0xe7, 0xac, 0x0b, 0xfd, 0xf3, 0x93,
- 0x82, 0xb4, 0x54, 0x91, 0x08, 0x1c, 0xd3, 0xd0, 0x66, 0x04, 0xc4, 0xa9, 0x5f, 0xf7, 0x1a, 0x7b,
- 0xd3, 0x7d, 0xe7, 0xac, 0x0b, 0x43, 0x71, 0x33, 0xb0, 0x80, 0x63, 0x45, 0x61, 0x7f, 0xb1, 0x00,
- 0x43, 0x73, 0x9b, 0x9b, 0xae, 0xe7, 0x46, 0x7b, 0xe8, 0x26, 0x8c, 0x78, 0x7e, 0x9d, 0xc8, 0xff,
- 0xec, 0x2b, 0x86, 0x9f, 0x3b, 0x37, 0x9b, 0x9e, 0x4a, 0xb3, 0x6b, 0x1a, 0xdd, 0xfc, 0xc4, 0xbd,
- 0xfd, 0xf2, 0x88, 0x0e, 0xc1, 0x06, 0x1f, 0x84, 0x61, 0xb8, 0xe5, 0xd7, 0x15, 0xdb, 0x02, 0x63,
- 0x5b, 0xce, 0x62, 0x5b, 0x89, 0xc9, 0xe6, 0xc7, 0xef, 0xed, 0x97, 0x87, 0x35, 0x00, 0xd6, 0x99,
- 0xa0, 0x0d, 0x18, 0xa7, 0x7f, 0xbd, 0xc8, 0x55, 0x7c, 0x8b, 0x8c, 0xef, 0x63, 0x79, 0x7c, 0x35,
- 0xd2, 0xf9, 0xa9, 0x7b, 0xfb, 0xe5, 0xf1, 0x04, 0x10, 0x27, 0x19, 0xda, 0x77, 0x61, 0x6c, 0x2e,
- 0x8a, 0x9c, 0xda, 0x36, 0xa9, 0xf3, 0x11, 0x44, 0x2f, 0x40, 0x9f, 0xe7, 0x34, 0x89, 0x18, 0xdf,
- 0x73, 0xa2, 0x63, 0xfb, 0xd6, 0x9c, 0x26, 0x39, 0xd8, 0x2f, 0x4f, 0xdc, 0xf0, 0xdc, 0xb7, 0xda,
- 0x62, 0x56, 0x50, 0x18, 0x66, 0xd4, 0xe8, 0x39, 0x80, 0x3a, 0xd9, 0x75, 0x6b, 0xa4, 0xe2, 0x44,
- 0xdb, 0x62, 0xbc, 0x91, 0x28, 0x0b, 0x8b, 0x0a, 0x83, 0x35, 0x2a, 0xfb, 0x0e, 0x94, 0xe6, 0x76,
- 0x7d, 0xb7, 0x5e, 0xf1, 0xeb, 0x21, 0xda, 0x81, 0xf1, 0x56, 0x40, 0x36, 0x49, 0xa0, 0x40, 0xd3,
- 0xd6, 0xb9, 0xe2, 0x85, 0xe1, 0xe7, 0x2e, 0x64, 0x7e, 0xac, 0x49, 0xba, 0xe4, 0x45, 0xc1, 0xde,
- 0xfc, 0x43, 0xa2, 0xbe, 0xf1, 0x04, 0x16, 0x27, 0x39, 0xdb, 0xff, 0xac, 0x00, 0x27, 0xe7, 0xee,
- 0xb6, 0x03, 0xb2, 0xe8, 0x86, 0x3b, 0xc9, 0x19, 0x5e, 0x77, 0xc3, 0x9d, 0xb5, 0xb8, 0x07, 0xd4,
- 0xd4, 0x5a, 0x14, 0x70, 0xac, 0x28, 0xd0, 0xb3, 0x30, 0x48, 0x7f, 0xdf, 0xc0, 0x2b, 0xe2, 0x93,
- 0xa7, 0x04, 0xf1, 0xf0, 0xa2, 0x13, 0x39, 0x8b, 0x1c, 0x85, 0x25, 0x0d, 0x5a, 0x85, 0xe1, 0x1a,
- 0x5b, 0x90, 0x5b, 0xab, 0x7e, 0x9d, 0xb0, 0xc1, 0x2c, 0xcd, 0x3f, 0x4d, 0xc9, 0x17, 0x62, 0xf0,
- 0xc1, 0x7e, 0x79, 0x9a, 0xb7, 0x4d, 0xb0, 0xd0, 0x70, 0x58, 0x2f, 0x8f, 0x6c, 0xb5, 0xbe, 0xfa,
- 0x18, 0x27, 0xc8, 0x58, 0x5b, 0x17, 0xb4, 0xa5, 0xd2, 0xcf, 0x96, 0xca, 0x48, 0xf6, 0x32, 0x41,
- 0x97, 0xa0, 0x6f, 0xc7, 0xf5, 0xea, 0xd3, 0x03, 0x8c, 0xd7, 0x19, 0x3a, 0xe6, 0x57, 0x5d, 0xaf,
- 0x7e, 0xb0, 0x5f, 0x9e, 0x34, 0x9a, 0x43, 0x81, 0x98, 0x91, 0xda, 0xff, 0xdd, 0x82, 0x32, 0xc3,
- 0x2d, 0xbb, 0x0d, 0x52, 0x21, 0x41, 0xe8, 0x86, 0x11, 0xf1, 0x22, 0xa3, 0x43, 0x9f, 0x03, 0x08,
- 0x49, 0x2d, 0x20, 0x91, 0xd6, 0xa5, 0x6a, 0x62, 0x54, 0x15, 0x06, 0x6b, 0x54, 0x74, 0x43, 0x08,
- 0xb7, 0x9d, 0x80, 0xcd, 0x2f, 0xd1, 0xb1, 0x6a, 0x43, 0xa8, 0x4a, 0x04, 0x8e, 0x69, 0x8c, 0x0d,
- 0xa1, 0xd8, 0x6d, 0x43, 0x40, 0x1f, 0x86, 0xf1, 0xb8, 0xb2, 0xb0, 0xe5, 0xd4, 0x64, 0x07, 0xb2,
- 0x25, 0x53, 0x35, 0x51, 0x38, 0x49, 0x6b, 0xff, 0x2d, 0x4b, 0x4c, 0x1e, 0xfa, 0xd5, 0xef, 0xf0,
- 0x6f, 0xb5, 0x7f, 0xc9, 0x82, 0xc1, 0x79, 0xd7, 0xab, 0xbb, 0xde, 0x16, 0xfa, 0x14, 0x0c, 0xd1,
- 0xb3, 0xa9, 0xee, 0x44, 0x8e, 0xd8, 0xf7, 0xde, 0xa7, 0xad, 0x2d, 0x75, 0x54, 0xcc, 0xb6, 0x76,
- 0xb6, 0x28, 0x20, 0x9c, 0xa5, 0xd4, 0x74, 0xb5, 0x5d, 0xdf, 0xf8, 0x34, 0xa9, 0x45, 0xab, 0x24,
- 0x72, 0xe2, 0xcf, 0x89, 0x61, 0x58, 0x71, 0x45, 0x57, 0x61, 0x20, 0x72, 0x82, 0x2d, 0x12, 0x89,
- 0x0d, 0x30, 0x73, 0xa3, 0xe2, 0x25, 0x31, 0x5d, 0x91, 0xc4, 0xab, 0x91, 0xf8, 0x58, 0x58, 0x67,
- 0x45, 0xb1, 0x60, 0x61, 0xff, 0x9f, 0x41, 0x38, 0xbd, 0x50, 0x5d, 0xc9, 0x99, 0x57, 0xe7, 0x61,
- 0xa0, 0x1e, 0xb8, 0xbb, 0x24, 0x10, 0xfd, 0xac, 0xb8, 0x2c, 0x32, 0x28, 0x16, 0x58, 0xf4, 0x12,
- 0x8c, 0xf0, 0x03, 0xe9, 0x8a, 0xe3, 0xd5, 0x1b, 0xb2, 0x8b, 0x4f, 0x08, 0xea, 0x91, 0x9b, 0x1a,
- 0x0e, 0x1b, 0x94, 0x87, 0x9c, 0x54, 0xe7, 0x13, 0x8b, 0x31, 0xef, 0xb0, 0xfb, 0xbc, 0x05, 0x13,
- 0xbc, 0x9a, 0xb9, 0x28, 0x0a, 0xdc, 0x8d, 0x76, 0x44, 0xc2, 0xe9, 0x7e, 0xb6, 0xd3, 0x2d, 0x64,
- 0xf5, 0x56, 0x6e, 0x0f, 0xcc, 0xde, 0x4c, 0x70, 0xe1, 0x9b, 0xe0, 0xb4, 0xa8, 0x77, 0x22, 0x89,
- 0xc6, 0xa9, 0x6a, 0xd1, 0xf7, 0x5a, 0x30, 0x53, 0xf3, 0xbd, 0x28, 0xf0, 0x1b, 0x0d, 0x12, 0x54,
- 0xda, 0x1b, 0x0d, 0x37, 0xdc, 0xe6, 0xf3, 0x14, 0x93, 0x4d, 0xb6, 0x13, 0xe4, 0x8c, 0xa1, 0x22,
- 0x12, 0x63, 0x78, 0xf6, 0xde, 0x7e, 0x79, 0x66, 0x21, 0x97, 0x15, 0xee, 0x50, 0x0d, 0xda, 0x01,
- 0x44, 0x8f, 0xd2, 0x6a, 0xe4, 0x6c, 0x91, 0xb8, 0xf2, 0xc1, 0xde, 0x2b, 0x3f, 0x75, 0x6f, 0xbf,
- 0x8c, 0xd6, 0x52, 0x2c, 0x70, 0x06, 0x5b, 0xf4, 0x16, 0x9c, 0xa0, 0xd0, 0xd4, 0xb7, 0x0e, 0xf5,
- 0x5e, 0xdd, 0xf4, 0xbd, 0xfd, 0xf2, 0x89, 0xb5, 0x0c, 0x26, 0x38, 0x93, 0x35, 0xfa, 0x6e, 0x0b,
- 0x4e, 0xc7, 0x9f, 0xbf, 0x74, 0xa7, 0xe5, 0x78, 0xf5, 0xb8, 0xe2, 0x52, 0xef, 0x15, 0xd3, 0x3d,
- 0xf9, 0xf4, 0x42, 0x1e, 0x27, 0x9c, 0x5f, 0x09, 0xf2, 0x60, 0x8a, 0x36, 0x2d, 0x59, 0x37, 0xf4,
- 0x5e, 0xf7, 0x43, 0xf7, 0xf6, 0xcb, 0x53, 0x6b, 0x69, 0x1e, 0x38, 0x8b, 0xf1, 0xcc, 0x02, 0x9c,
- 0xcc, 0x9c, 0x9d, 0x68, 0x02, 0x8a, 0x3b, 0x84, 0x4b, 0x5d, 0x25, 0x4c, 0x7f, 0xa2, 0x13, 0xd0,
- 0xbf, 0xeb, 0x34, 0xda, 0x62, 0x61, 0x62, 0xfe, 0xe7, 0xe5, 0xc2, 0x4b, 0x96, 0xfd, 0xcf, 0x8b,
- 0x30, 0xbe, 0x50, 0x5d, 0xb9, 0xaf, 0x55, 0xaf, 0x1f, 0x7b, 0x85, 0x8e, 0xc7, 0x5e, 0x7c, 0x88,
- 0x16, 0x73, 0x0f, 0xd1, 0xff, 0x3f, 0x63, 0xc9, 0xf6, 0xb1, 0x25, 0xfb, 0xc1, 0x9c, 0x25, 0x7b,
- 0xc4, 0x0b, 0x75, 0x37, 0x67, 0xd6, 0xf6, 0xb3, 0x01, 0xcc, 0x94, 0x90, 0xae, 0xf9, 0x35, 0xa7,
- 0x91, 0xdc, 0x6a, 0x0f, 0x39, 0x75, 0x8f, 0x66, 0x1c, 0x6b, 0x30, 0xb2, 0xe0, 0xb4, 0x9c, 0x0d,
- 0xb7, 0xe1, 0x46, 0x2e, 0x09, 0xd1, 0x93, 0x50, 0x74, 0xea, 0x75, 0x26, 0xdd, 0x95, 0xe6, 0x4f,
- 0xde, 0xdb, 0x2f, 0x17, 0xe7, 0xea, 0x54, 0xcc, 0x00, 0x45, 0xb5, 0x87, 0x29, 0x05, 0x7a, 0x2f,
- 0xf4, 0xd5, 0x03, 0xbf, 0x35, 0x5d, 0x60, 0x94, 0x74, 0x95, 0xf7, 0x2d, 0x06, 0x7e, 0x2b, 0x41,
- 0xca, 0x68, 0xec, 0xdf, 0x28, 0xc0, 0x23, 0x0b, 0xa4, 0xb5, 0xbd, 0x5c, 0xcd, 0x39, 0x2f, 0x2e,
- 0xc0, 0x50, 0xd3, 0xf7, 0xdc, 0xc8, 0x0f, 0x42, 0x51, 0x35, 0x9b, 0x11, 0xab, 0x02, 0x86, 0x15,
- 0x16, 0x9d, 0x83, 0xbe, 0x56, 0x2c, 0xc4, 0x8e, 0x48, 0x01, 0x98, 0x89, 0xaf, 0x0c, 0x43, 0x29,
- 0xda, 0x21, 0x09, 0xc4, 0x8c, 0x51, 0x14, 0x37, 0x42, 0x12, 0x60, 0x86, 0x89, 0x25, 0x01, 0x2a,
- 0x23, 0x88, 0x13, 0x21, 0x21, 0x09, 0x50, 0x0c, 0xd6, 0xa8, 0x50, 0x05, 0x4a, 0x61, 0x62, 0x64,
- 0x7b, 0x5a, 0x9a, 0xa3, 0x4c, 0x54, 0x50, 0x23, 0x19, 0x33, 0x31, 0x4e, 0xb0, 0x81, 0xae, 0xa2,
- 0xc2, 0xd7, 0x0a, 0x80, 0x78, 0x17, 0x7e, 0x9b, 0x75, 0xdc, 0x8d, 0x74, 0xc7, 0xf5, 0xbe, 0x24,
- 0x8e, 0xaa, 0xf7, 0xfe, 0x87, 0x05, 0x8f, 0x2c, 0xb8, 0x5e, 0x9d, 0x04, 0x39, 0x13, 0xf0, 0xc1,
- 0xdc, 0x9d, 0x0f, 0x27, 0xa4, 0x18, 0x53, 0xac, 0xef, 0x08, 0xa6, 0x98, 0xfd, 0x27, 0x16, 0x20,
- 0xfe, 0xd9, 0xef, 0xb8, 0x8f, 0xbd, 0x91, 0xfe, 0xd8, 0x23, 0x98, 0x16, 0xf6, 0xdf, 0xb3, 0x60,
- 0x78, 0xa1, 0xe1, 0xb8, 0x4d, 0xf1, 0xa9, 0x0b, 0x30, 0x29, 0x15, 0x45, 0x0c, 0xac, 0xc9, 0xfe,
- 0x74, 0x73, 0x9b, 0xc4, 0x49, 0x24, 0x4e, 0xd3, 0xa3, 0x8f, 0xc3, 0x69, 0x03, 0xb8, 0x4e, 0x9a,
- 0xad, 0x86, 0x13, 0xe9, 0xb7, 0x02, 0x76, 0xfa, 0xe3, 0x3c, 0x22, 0x9c, 0x5f, 0xde, 0xbe, 0x06,
- 0x63, 0x0b, 0x0d, 0x97, 0x78, 0xd1, 0x4a, 0x65, 0xc1, 0xf7, 0x36, 0xdd, 0x2d, 0xf4, 0x32, 0x8c,
- 0x45, 0x6e, 0x93, 0xf8, 0xed, 0xa8, 0x4a, 0x6a, 0xbe, 0xc7, 0xee, 0xda, 0xd6, 0x85, 0xfe, 0x79,
- 0x74, 0x6f, 0xbf, 0x3c, 0xb6, 0x6e, 0x60, 0x70, 0x82, 0xd2, 0xfe, 0x7d, 0x3a, 0xe2, 0x7e, 0xb3,
- 0xe5, 0x7b, 0xc4, 0x8b, 0x16, 0x7c, 0xaf, 0xce, 0x75, 0x32, 0x2f, 0x43, 0x5f, 0x44, 0x47, 0x90,
- 0x7f, 0xf9, 0x79, 0xb9, 0xb4, 0xe9, 0xb8, 0x1d, 0xec, 0x97, 0x4f, 0xa5, 0x4b, 0xb0, 0x91, 0x65,
- 0x65, 0xd0, 0x07, 0x61, 0x20, 0x8c, 0x9c, 0xa8, 0x1d, 0x8a, 0x4f, 0x7d, 0x54, 0x8e, 0x7f, 0x95,
- 0x41, 0x0f, 0xf6, 0xcb, 0xe3, 0xaa, 0x18, 0x07, 0x61, 0x51, 0x00, 0x3d, 0x05, 0x83, 0x4d, 0x12,
- 0x86, 0xce, 0x96, 0x3c, 0xbf, 0xc7, 0x45, 0xd9, 0xc1, 0x55, 0x0e, 0xc6, 0x12, 0x8f, 0x1e, 0x83,
- 0x7e, 0x12, 0x04, 0x7e, 0x20, 0x76, 0x95, 0x51, 0x41, 0xd8, 0xbf, 0x44, 0x81, 0x98, 0xe3, 0xec,
- 0x7f, 0x63, 0xc1, 0xb8, 0x6a, 0x2b, 0xaf, 0xeb, 0x18, 0xee, 0x4d, 0x6f, 0x00, 0xd4, 0xe4, 0x07,
- 0x86, 0xec, 0xbc, 0x1b, 0x7e, 0xee, 0x7c, 0xa6, 0x68, 0x91, 0xea, 0xc6, 0x98, 0xb3, 0x02, 0x85,
- 0x58, 0xe3, 0x66, 0xff, 0xaa, 0x05, 0x53, 0x89, 0x2f, 0xba, 0xe6, 0x86, 0x11, 0x7a, 0x33, 0xf5,
- 0x55, 0xb3, 0xbd, 0x7d, 0x15, 0x2d, 0xcd, 0xbe, 0x49, 0x2d, 0x3e, 0x09, 0xd1, 0xbe, 0xe8, 0x0a,
- 0xf4, 0xbb, 0x11, 0x69, 0xca, 0x8f, 0x79, 0xac, 0xe3, 0xc7, 0xf0, 0x56, 0xc5, 0x23, 0xb2, 0x42,
- 0x4b, 0x62, 0xce, 0xc0, 0xfe, 0x8d, 0x22, 0x94, 0xf8, 0xb4, 0x5d, 0x75, 0x5a, 0xc7, 0x30, 0x16,
- 0x4f, 0x43, 0xc9, 0x6d, 0x36, 0xdb, 0x91, 0xb3, 0x21, 0x0e, 0xa0, 0x21, 0xbe, 0x19, 0xac, 0x48,
- 0x20, 0x8e, 0xf1, 0x68, 0x05, 0xfa, 0x58, 0x53, 0xf8, 0x57, 0x3e, 0x99, 0xfd, 0x95, 0xa2, 0xed,
- 0xb3, 0x8b, 0x4e, 0xe4, 0x70, 0xd9, 0x4f, 0x9d, 0x7c, 0x14, 0x84, 0x19, 0x0b, 0xe4, 0x00, 0x6c,
- 0xb8, 0x9e, 0x13, 0xec, 0x51, 0xd8, 0x74, 0x91, 0x31, 0x7c, 0xb6, 0x33, 0xc3, 0x79, 0x45, 0xcf,
- 0xd9, 0xaa, 0x0f, 0x8b, 0x11, 0x58, 0x63, 0x3a, 0xf3, 0x01, 0x28, 0x29, 0xe2, 0xc3, 0x88, 0x70,
- 0x33, 0x1f, 0x86, 0xf1, 0x44, 0x5d, 0xdd, 0x8a, 0x8f, 0xe8, 0x12, 0xe0, 0x2f, 0xb3, 0x2d, 0x43,
- 0xb4, 0x7a, 0xc9, 0xdb, 0x15, 0x3b, 0xe7, 0x5d, 0x38, 0xd1, 0xc8, 0xd8, 0x7b, 0xc5, 0xb8, 0xf6,
- 0xbe, 0x57, 0x3f, 0x22, 0x3e, 0xfb, 0x44, 0x16, 0x16, 0x67, 0xd6, 0x41, 0xa5, 0x1a, 0xbf, 0x45,
- 0x17, 0x88, 0xd3, 0xd0, 0x2f, 0x08, 0xd7, 0x05, 0x0c, 0x2b, 0x2c, 0xdd, 0xef, 0x4e, 0xa8, 0xc6,
- 0x5f, 0x25, 0x7b, 0x55, 0xd2, 0x20, 0xb5, 0xc8, 0x0f, 0xbe, 0xa5, 0xcd, 0x3f, 0xc3, 0x7b, 0x9f,
- 0x6f, 0x97, 0xc3, 0x82, 0x41, 0xf1, 0x2a, 0xd9, 0xe3, 0x43, 0xa1, 0x7f, 0x5d, 0xb1, 0xe3, 0xd7,
- 0x7d, 0xc5, 0x82, 0x51, 0xf5, 0x75, 0xc7, 0xb0, 0x2f, 0xcc, 0x9b, 0xfb, 0xc2, 0x99, 0x8e, 0x13,
- 0x3c, 0x67, 0x47, 0xf8, 0x5a, 0x01, 0x4e, 0x2b, 0x1a, 0x7a, 0x9b, 0xe1, 0x7f, 0xc4, 0xac, 0xba,
- 0x08, 0x25, 0x4f, 0xe9, 0xf5, 0x2c, 0x53, 0xa1, 0x16, 0x6b, 0xf5, 0x62, 0x1a, 0x2a, 0x94, 0x7a,
- 0xf1, 0x31, 0x3b, 0xa2, 0x2b, 0xbc, 0x85, 0x72, 0x7b, 0x1e, 0x8a, 0x6d, 0xb7, 0x2e, 0x0e, 0x98,
- 0xf7, 0xc9, 0xde, 0xbe, 0xb1, 0xb2, 0x78, 0xb0, 0x5f, 0x7e, 0x34, 0xcf, 0xd8, 0x42, 0x4f, 0xb6,
- 0x70, 0xf6, 0xc6, 0xca, 0x22, 0xa6, 0x85, 0xd1, 0x1c, 0x8c, 0xcb, 0x13, 0xfa, 0x26, 0x15, 0x10,
- 0x7d, 0x4f, 0x9c, 0x43, 0x4a, 0x6b, 0x8d, 0x4d, 0x34, 0x4e, 0xd2, 0xa3, 0x45, 0x98, 0xd8, 0x69,
- 0x6f, 0x90, 0x06, 0x89, 0xf8, 0x07, 0x5f, 0x25, 0x5c, 0xa7, 0x5b, 0x8a, 0xef, 0x92, 0x57, 0x13,
- 0x78, 0x9c, 0x2a, 0x61, 0xff, 0x39, 0x3b, 0x0f, 0x44, 0xef, 0x55, 0x02, 0x9f, 0x4e, 0x2c, 0xca,
- 0xfd, 0x5b, 0x39, 0x9d, 0x7b, 0x99, 0x15, 0x57, 0xc9, 0xde, 0xba, 0x4f, 0xef, 0x12, 0xd9, 0xb3,
- 0xc2, 0x98, 0xf3, 0x7d, 0x1d, 0xe7, 0xfc, 0xcf, 0x17, 0xe0, 0xa4, 0xea, 0x01, 0x43, 0x6c, 0xfd,
- 0x76, 0xef, 0x83, 0x4b, 0x30, 0x5c, 0x27, 0x9b, 0x4e, 0xbb, 0x11, 0x29, 0x03, 0x43, 0x3f, 0x37,
- 0x32, 0x2d, 0xc6, 0x60, 0xac, 0xd3, 0x1c, 0xa2, 0xdb, 0xfe, 0xfd, 0x08, 0x3b, 0x88, 0x23, 0x87,
- 0xce, 0x71, 0xb5, 0x6a, 0xac, 0xdc, 0x55, 0xf3, 0x18, 0xf4, 0xbb, 0x4d, 0x2a, 0x98, 0x15, 0x4c,
- 0x79, 0x6b, 0x85, 0x02, 0x31, 0xc7, 0xa1, 0x27, 0x60, 0xb0, 0xe6, 0x37, 0x9b, 0x8e, 0x57, 0x67,
- 0x47, 0x5e, 0x69, 0x7e, 0x98, 0xca, 0x6e, 0x0b, 0x1c, 0x84, 0x25, 0x0e, 0x3d, 0x02, 0x7d, 0x4e,
- 0xb0, 0xc5, 0xb5, 0x2e, 0xa5, 0xf9, 0x21, 0x5a, 0xd3, 0x5c, 0xb0, 0x15, 0x62, 0x06, 0xa5, 0x97,
- 0xc6, 0xdb, 0x7e, 0xb0, 0xe3, 0x7a, 0x5b, 0x8b, 0x6e, 0x20, 0x96, 0x84, 0x3a, 0x0b, 0x6f, 0x29,
- 0x0c, 0xd6, 0xa8, 0xd0, 0x32, 0xf4, 0xb7, 0xfc, 0x20, 0x0a, 0xa7, 0x07, 0x58, 0x77, 0x3f, 0x9a,
- 0xb3, 0x11, 0xf1, 0xaf, 0xad, 0xf8, 0x41, 0x14, 0x7f, 0x00, 0xfd, 0x17, 0x62, 0x5e, 0x1c, 0x5d,
- 0x83, 0x41, 0xe2, 0xed, 0x2e, 0x07, 0x7e, 0x73, 0x7a, 0x2a, 0x9f, 0xd3, 0x12, 0x27, 0xe1, 0xd3,
- 0x2c, 0x96, 0x51, 0x05, 0x18, 0x4b, 0x16, 0xe8, 0x83, 0x50, 0x24, 0xde, 0xee, 0xf4, 0x20, 0xe3,
- 0x34, 0x93, 0xc3, 0xe9, 0xa6, 0x13, 0xc4, 0x7b, 0xfe, 0x92, 0xb7, 0x8b, 0x69, 0x19, 0xf4, 0x31,
- 0x28, 0xc9, 0x0d, 0x23, 0x14, 0xea, 0xcc, 0xcc, 0x09, 0x2b, 0xb7, 0x19, 0x4c, 0xde, 0x6a, 0xbb,
- 0x01, 0x69, 0x12, 0x2f, 0x0a, 0xe3, 0x1d, 0x52, 0x62, 0x43, 0x1c, 0x73, 0x43, 0x35, 0x18, 0x09,
- 0x48, 0xe8, 0xde, 0x25, 0x15, 0xbf, 0xe1, 0xd6, 0xf6, 0xa6, 0x1f, 0x62, 0xcd, 0x7b, 0xaa, 0x63,
- 0x97, 0x61, 0xad, 0x40, 0xac, 0x6e, 0xd7, 0xa1, 0xd8, 0x60, 0x8a, 0x3e, 0x26, 0x15, 0xf5, 0xab,
- 0x7e, 0xdb, 0x8b, 0xc2, 0xe9, 0x12, 0xab, 0x24, 0xd3, 0x84, 0x7a, 0x33, 0xa6, 0x4b, 0x6a, 0xf2,
- 0x79, 0x61, 0x6c, 0xb0, 0x42, 0x9f, 0x80, 0x51, 0xfe, 0x9f, 0x1b, 0x22, 0xc3, 0xe9, 0x93, 0x8c,
- 0xf7, 0xb9, 0x7c, 0xde, 0x9c, 0x70, 0xfe, 0xa4, 0x60, 0x3e, 0xaa, 0x43, 0x43, 0x6c, 0x72, 0x43,
- 0x18, 0x46, 0x1b, 0xee, 0x2e, 0xf1, 0x48, 0x18, 0x56, 0x02, 0x7f, 0x83, 0x08, 0xbd, 0xea, 0xe9,
- 0x6c, 0xc3, 0xa5, 0xbf, 0x41, 0xe6, 0x27, 0x29, 0xcf, 0x6b, 0x7a, 0x19, 0x6c, 0xb2, 0x40, 0x37,
- 0x60, 0x8c, 0x5e, 0x64, 0xdd, 0x98, 0xe9, 0x70, 0x37, 0xa6, 0xec, 0xf2, 0x86, 0x8d, 0x42, 0x38,
- 0xc1, 0x04, 0x5d, 0x87, 0x91, 0x30, 0x72, 0x82, 0xa8, 0xdd, 0xe2, 0x4c, 0x4f, 0x75, 0x63, 0xca,
- 0xec, 0xde, 0x55, 0xad, 0x08, 0x36, 0x18, 0xa0, 0xd7, 0xa0, 0xd4, 0x70, 0x37, 0x49, 0x6d, 0xaf,
- 0xd6, 0x20, 0xd3, 0x23, 0x8c, 0x5b, 0xe6, 0xce, 0x75, 0x4d, 0x12, 0x71, 0x61, 0x5a, 0xfd, 0xc5,
- 0x71, 0x71, 0x74, 0x13, 0x4e, 0x45, 0x24, 0x68, 0xba, 0x9e, 0x43, 0x77, 0x1c, 0x71, 0x7f, 0x63,
- 0xf6, 0xe4, 0x51, 0xb6, 0xa4, 0xcf, 0x8a, 0xd1, 0x38, 0xb5, 0x9e, 0x49, 0x85, 0x73, 0x4a, 0xa3,
- 0x3b, 0x30, 0x9d, 0x81, 0xe1, 0x53, 0xf9, 0x04, 0xe3, 0xfc, 0x21, 0xc1, 0x79, 0x7a, 0x3d, 0x87,
- 0xee, 0xa0, 0x03, 0x0e, 0xe7, 0x72, 0x47, 0xd7, 0x61, 0x9c, 0x6d, 0x73, 0x95, 0x76, 0xa3, 0x21,
- 0x2a, 0x1c, 0x63, 0x15, 0x3e, 0x21, 0x0f, 0xfd, 0x15, 0x13, 0x7d, 0xb0, 0x5f, 0x86, 0xf8, 0x1f,
- 0x4e, 0x96, 0x46, 0x1b, 0xcc, 0x74, 0xd9, 0x0e, 0xdc, 0x68, 0x8f, 0xae, 0x34, 0x72, 0x27, 0x9a,
- 0x1e, 0xef, 0xa8, 0xc6, 0xd1, 0x49, 0x95, 0x7d, 0x53, 0x07, 0xe2, 0x24, 0x43, 0xba, 0x6f, 0x87,
- 0x51, 0xdd, 0xf5, 0xa6, 0x27, 0xf8, 0xe5, 0x47, 0x6e, 0x7b, 0x55, 0x0a, 0xc4, 0x1c, 0xc7, 0xcc,
- 0x96, 0xf4, 0xc7, 0x75, 0x7a, 0x3c, 0x4e, 0x32, 0xc2, 0xd8, 0x6c, 0x29, 0x11, 0x38, 0xa6, 0xa1,
- 0x12, 0x6b, 0x14, 0xed, 0x4d, 0x23, 0x46, 0xaa, 0x76, 0xaf, 0xf5, 0xf5, 0x8f, 0x61, 0x0a, 0xb7,
- 0x37, 0x60, 0x4c, 0x6d, 0x1d, 0xac, 0x4f, 0x50, 0x19, 0xfa, 0x99, 0x8c, 0x26, 0x94, 0x8e, 0x25,
- 0xda, 0x04, 0x26, 0xbf, 0x61, 0x0e, 0x67, 0x4d, 0x70, 0xef, 0x92, 0xf9, 0xbd, 0x88, 0x70, 0xc5,
- 0x41, 0x51, 0x6b, 0x82, 0x44, 0xe0, 0x98, 0xc6, 0xfe, 0xbf, 0x5c, 0xd6, 0x8d, 0xb7, 0xf4, 0x1e,
- 0x0e, 0xb1, 0x67, 0x60, 0x68, 0xdb, 0x0f, 0x23, 0x4a, 0xcd, 0xea, 0xe8, 0x8f, 0xa5, 0xdb, 0x2b,
- 0x02, 0x8e, 0x15, 0x05, 0x7a, 0x05, 0x46, 0x6b, 0x7a, 0x05, 0xe2, 0x04, 0x56, 0xdb, 0x88, 0x51,
- 0x3b, 0x36, 0x69, 0xd1, 0x4b, 0x30, 0xc4, 0x5c, 0x71, 0x6a, 0x7e, 0x43, 0x88, 0x86, 0x52, 0x8c,
- 0x18, 0xaa, 0x08, 0xf8, 0x81, 0xf6, 0x1b, 0x2b, 0x6a, 0x74, 0x1e, 0x06, 0x68, 0x13, 0x56, 0x2a,
- 0xe2, 0xec, 0x53, 0xfa, 0xb3, 0x2b, 0x0c, 0x8a, 0x05, 0xd6, 0xfe, 0x55, 0x8b, 0x09, 0x3e, 0xe9,
- 0x0d, 0x1a, 0x5d, 0x61, 0x3b, 0x3c, 0xdb, 0xee, 0x35, 0xfd, 0xd5, 0xe3, 0xda, 0xb6, 0xad, 0x70,
- 0x07, 0x89, 0xff, 0xd8, 0x28, 0x89, 0xde, 0x80, 0xd1, 0x80, 0xb0, 0x2d, 0x42, 0x4c, 0x78, 0x7e,
- 0xfa, 0xbf, 0x20, 0xbb, 0x00, 0xeb, 0xc8, 0x83, 0xfd, 0xf2, 0xc3, 0xf1, 0x79, 0x44, 0xdb, 0x63,
- 0xa0, 0xb1, 0xc9, 0xca, 0xfe, 0xcb, 0x05, 0x6d, 0x96, 0x54, 0x23, 0x27, 0x22, 0xa8, 0x02, 0x83,
- 0xb7, 0x1d, 0x37, 0x72, 0xbd, 0x2d, 0x21, 0xa4, 0x75, 0x3e, 0x95, 0x58, 0xa1, 0x5b, 0xbc, 0x00,
- 0x17, 0x35, 0xc4, 0x1f, 0x2c, 0xd9, 0x50, 0x8e, 0x41, 0xdb, 0xf3, 0x28, 0xc7, 0x42, 0xaf, 0x1c,
- 0x31, 0x2f, 0xc0, 0x39, 0x8a, 0x3f, 0x58, 0xb2, 0x41, 0x6f, 0x02, 0xc8, 0x1d, 0x82, 0xd4, 0x85,
- 0x0b, 0xcf, 0x33, 0xdd, 0x99, 0xae, 0xab, 0x32, 0xf3, 0x63, 0x54, 0x90, 0x89, 0xff, 0x63, 0x8d,
- 0x9f, 0x1d, 0x69, 0x63, 0xaa, 0x37, 0x06, 0x7d, 0x9c, 0x2e, 0x51, 0x27, 0x88, 0x48, 0x7d, 0x2e,
- 0x12, 0x9d, 0xf3, 0xde, 0xde, 0x6e, 0x72, 0xeb, 0x6e, 0x93, 0xe8, 0xcb, 0x59, 0x30, 0xc1, 0x31,
- 0x3f, 0xfb, 0x17, 0x8b, 0x30, 0x9d, 0xd7, 0x5c, 0xba, 0x68, 0xc8, 0x1d, 0x37, 0x5a, 0xa0, 0x32,
- 0xa8, 0x65, 0x2e, 0x9a, 0x25, 0x01, 0xc7, 0x8a, 0x82, 0xce, 0xde, 0xd0, 0xdd, 0x92, 0x17, 0xf1,
- 0xfe, 0x78, 0xf6, 0x56, 0x19, 0x14, 0x0b, 0x2c, 0xa5, 0x0b, 0x88, 0x13, 0x0a, 0x1f, 0x31, 0x6d,
- 0x96, 0x63, 0x06, 0xc5, 0x02, 0xab, 0xab, 0x04, 0xfb, 0xba, 0xa8, 0x04, 0x8d, 0x2e, 0xea, 0x3f,
- 0xda, 0x2e, 0x42, 0x9f, 0x04, 0xd8, 0x74, 0x3d, 0x37, 0xdc, 0x66, 0xdc, 0x07, 0x0e, 0xcd, 0x5d,
- 0x49, 0xb0, 0xcb, 0x8a, 0x0b, 0xd6, 0x38, 0xa2, 0x17, 0x61, 0x58, 0x6d, 0x20, 0x2b, 0x8b, 0xcc,
- 0x60, 0xae, 0x39, 0x20, 0xc5, 0xbb, 0xe9, 0x22, 0xd6, 0xe9, 0xec, 0x4f, 0x27, 0xe7, 0x8b, 0x58,
- 0x01, 0x5a, 0xff, 0x5a, 0xbd, 0xf6, 0x6f, 0xa1, 0x73, 0xff, 0xda, 0xdf, 0x18, 0x80, 0x71, 0xa3,
- 0xb2, 0x76, 0xd8, 0xc3, 0x9e, 0x7b, 0x99, 0x1e, 0x40, 0x4e, 0x44, 0xc4, 0xfa, 0xb3, 0xbb, 0x2f,
- 0x15, 0xfd, 0x90, 0xa2, 0x2b, 0x80, 0x97, 0x47, 0x9f, 0x84, 0x52, 0xc3, 0x09, 0x99, 0x7a, 0x91,
- 0x88, 0x75, 0xd7, 0x0b, 0xb3, 0xf8, 0xf6, 0xe6, 0x84, 0x91, 0x76, 0xea, 0x73, 0xde, 0x31, 0x4b,
- 0x7a, 0x52, 0x52, 0xf9, 0x4a, 0x3a, 0x21, 0xaa, 0x46, 0x50, 0x21, 0x6c, 0x0f, 0x73, 0x1c, 0x7a,
- 0x89, 0x6d, 0xad, 0x74, 0x56, 0x2c, 0x50, 0x69, 0x94, 0x4d, 0xb3, 0x7e, 0x43, 0x22, 0x56, 0x38,
- 0x6c, 0x50, 0xc6, 0x17, 0xa8, 0x81, 0x0e, 0x17, 0xa8, 0xa7, 0x60, 0x90, 0xfd, 0x50, 0x33, 0x40,
- 0x8d, 0xc6, 0x0a, 0x07, 0x63, 0x89, 0x4f, 0x4e, 0x98, 0xa1, 0xde, 0x26, 0x0c, 0xbd, 0xa2, 0x89,
- 0x49, 0xcd, 0x9c, 0x15, 0x86, 0xf8, 0x2e, 0x27, 0xa6, 0x3c, 0x96, 0x38, 0xf4, 0xb3, 0x16, 0x20,
- 0xa7, 0x41, 0xaf, 0xb6, 0x14, 0xac, 0x6e, 0x22, 0xc0, 0x44, 0xed, 0x57, 0xba, 0x76, 0x7b, 0x3b,
- 0x9c, 0x9d, 0x4b, 0x95, 0xe6, 0x6a, 0xcd, 0x97, 0x45, 0x13, 0x51, 0x9a, 0x40, 0x3f, 0x8c, 0xae,
- 0xb9, 0x61, 0xf4, 0xb9, 0xff, 0x98, 0x38, 0x9c, 0x32, 0x9a, 0x84, 0x6e, 0xe8, 0x37, 0xa5, 0xe1,
- 0x43, 0xde, 0x94, 0x46, 0xf3, 0x6e, 0x49, 0x33, 0x6d, 0x78, 0x28, 0xe7, 0x0b, 0x32, 0x94, 0xa5,
- 0x8b, 0xba, 0xb2, 0xb4, 0x8b, 0x8a, 0x6d, 0x56, 0xd6, 0x31, 0xfb, 0x7a, 0xdb, 0xf1, 0x22, 0x37,
- 0xda, 0xd3, 0x95, 0xab, 0xef, 0x85, 0xb1, 0x45, 0x87, 0x34, 0x7d, 0x6f, 0xc9, 0xab, 0xb7, 0x7c,
- 0xd7, 0x8b, 0xd0, 0x34, 0xf4, 0x31, 0xe1, 0x83, 0x6f, 0xbd, 0x7d, 0xb4, 0xf7, 0x30, 0x83, 0xd8,
- 0x5b, 0x70, 0x72, 0xd1, 0xbf, 0xed, 0xdd, 0x76, 0x82, 0xfa, 0x5c, 0x65, 0x45, 0x53, 0xfe, 0xac,
- 0x49, 0xe5, 0x83, 0x95, 0x7f, 0xb5, 0xd3, 0x4a, 0xf2, 0xeb, 0xd0, 0xb2, 0xdb, 0x20, 0x39, 0x2a,
- 0xba, 0xbf, 0x56, 0x30, 0x6a, 0x8a, 0xe9, 0x95, 0x91, 0xd8, 0xca, 0x35, 0x12, 0xbf, 0x0e, 0x43,
- 0x9b, 0x2e, 0x69, 0xd4, 0x31, 0xd9, 0x14, 0xbd, 0xf3, 0x64, 0xbe, 0x1b, 0xd9, 0x32, 0xa5, 0x94,
- 0x2a, 0x59, 0xae, 0xba, 0x58, 0x16, 0x85, 0xb1, 0x62, 0x83, 0x76, 0x60, 0x42, 0xf6, 0xa1, 0xc4,
- 0x8a, 0xfd, 0xe0, 0xa9, 0x4e, 0x03, 0x6f, 0x32, 0x3f, 0x71, 0x6f, 0xbf, 0x3c, 0x81, 0x13, 0x6c,
- 0x70, 0x8a, 0x31, 0x7a, 0x04, 0xfa, 0x9a, 0xf4, 0xe4, 0xeb, 0x63, 0xdd, 0xcf, 0x74, 0x15, 0x4c,
- 0xed, 0xc2, 0xa0, 0xf6, 0x8f, 0x5b, 0xf0, 0x50, 0xaa, 0x67, 0x84, 0xfa, 0xe9, 0x88, 0x47, 0x21,
- 0xa9, 0x0e, 0x2a, 0x74, 0x57, 0x07, 0xd9, 0x7f, 0xdb, 0x82, 0x13, 0x4b, 0xcd, 0x56, 0xb4, 0xb7,
- 0xe8, 0x9a, 0x16, 0xdd, 0x0f, 0xc0, 0x40, 0x93, 0xd4, 0xdd, 0x76, 0x53, 0x8c, 0x5c, 0x59, 0x9e,
- 0x0e, 0xab, 0x0c, 0x7a, 0xb0, 0x5f, 0x1e, 0xad, 0x46, 0x7e, 0xe0, 0x6c, 0x11, 0x0e, 0xc0, 0x82,
- 0x9c, 0x9d, 0xb1, 0xee, 0x5d, 0x72, 0xcd, 0x6d, 0xba, 0xd1, 0xfd, 0xcd, 0x76, 0x61, 0x8c, 0x95,
- 0x4c, 0x70, 0xcc, 0xcf, 0xfe, 0xba, 0x05, 0xe3, 0x72, 0xde, 0xcf, 0xd5, 0xeb, 0x01, 0x09, 0x43,
- 0x34, 0x03, 0x05, 0xb7, 0x25, 0x5a, 0x09, 0xa2, 0x95, 0x85, 0x95, 0x0a, 0x2e, 0xb8, 0x2d, 0x29,
- 0xce, 0xb3, 0x03, 0xa8, 0x68, 0xda, 0xa5, 0xaf, 0x08, 0x38, 0x56, 0x14, 0xe8, 0x02, 0x0c, 0x79,
- 0x7e, 0x9d, 0x4b, 0xc4, 0x5c, 0x94, 0x60, 0x13, 0x6c, 0x4d, 0xc0, 0xb0, 0xc2, 0xa2, 0x0a, 0x94,
- 0xb8, 0xd7, 0x62, 0x3c, 0x69, 0x7b, 0xf2, 0x7d, 0x64, 0x5f, 0xb6, 0x2e, 0x4b, 0xe2, 0x98, 0x89,
- 0xfd, 0xeb, 0x16, 0x8c, 0xc8, 0x2f, 0xeb, 0xf1, 0xae, 0x42, 0x97, 0x56, 0x7c, 0x4f, 0x89, 0x97,
- 0x16, 0xbd, 0x6b, 0x30, 0x8c, 0x71, 0xc5, 0x28, 0x1e, 0xea, 0x8a, 0x71, 0x09, 0x86, 0x9d, 0x56,
- 0xab, 0x62, 0xde, 0x4f, 0xd8, 0x54, 0x9a, 0x8b, 0xc1, 0x58, 0xa7, 0xb1, 0x7f, 0xac, 0x00, 0x63,
- 0xf2, 0x0b, 0xaa, 0xed, 0x8d, 0x90, 0x44, 0x68, 0x1d, 0x4a, 0x0e, 0x1f, 0x25, 0x22, 0x27, 0xf9,
- 0x63, 0xd9, 0x4a, 0x2e, 0x63, 0x48, 0x63, 0x41, 0x6b, 0x4e, 0x96, 0xc6, 0x31, 0x23, 0xd4, 0x80,
- 0x49, 0xcf, 0x8f, 0xd8, 0xa1, 0xab, 0xf0, 0x9d, 0xec, 0x8e, 0x49, 0xee, 0xa7, 0x05, 0xf7, 0xc9,
- 0xb5, 0x24, 0x17, 0x9c, 0x66, 0x8c, 0x96, 0xa4, 0xe2, 0xb0, 0x98, 0xaf, 0x44, 0xd2, 0x07, 0x2e,
- 0x5b, 0x6f, 0x68, 0xff, 0x8a, 0x05, 0x25, 0x49, 0x76, 0x1c, 0x26, 0xe6, 0x55, 0x18, 0x0c, 0xd9,
- 0x20, 0xc8, 0xae, 0xb1, 0x3b, 0x35, 0x9c, 0x8f, 0x57, 0x2c, 0x4b, 0xf0, 0xff, 0x21, 0x96, 0x3c,
- 0x98, 0xdd, 0x48, 0x35, 0xff, 0x1d, 0x62, 0x37, 0x52, 0xed, 0xc9, 0x39, 0x94, 0xfe, 0x88, 0xb5,
- 0x59, 0x53, 0xc4, 0x52, 0x91, 0xb7, 0x15, 0x90, 0x4d, 0xf7, 0x4e, 0x52, 0xe4, 0xad, 0x30, 0x28,
- 0x16, 0x58, 0xf4, 0x26, 0x8c, 0xd4, 0xa4, 0xc1, 0x20, 0x5e, 0xe1, 0xe7, 0x3b, 0x1a, 0xaf, 0x94,
- 0x9d, 0x93, 0xeb, 0xd0, 0x16, 0xb4, 0xf2, 0xd8, 0xe0, 0x66, 0x7a, 0xe5, 0x14, 0xbb, 0x79, 0xe5,
- 0xc4, 0x7c, 0xf3, 0x7d, 0x54, 0x7e, 0xc2, 0x82, 0x01, 0xae, 0x28, 0xee, 0x4d, 0x4f, 0xaf, 0x99,
- 0x7d, 0xe3, 0xbe, 0xbb, 0x49, 0x81, 0x42, 0xd2, 0x40, 0xab, 0x50, 0x62, 0x3f, 0x98, 0xa2, 0xbb,
- 0x98, 0xff, 0x68, 0x86, 0xd7, 0xaa, 0x37, 0xf0, 0xa6, 0x2c, 0x86, 0x63, 0x0e, 0xf6, 0x8f, 0x16,
- 0xe9, 0xee, 0x16, 0x93, 0x1a, 0x87, 0xbe, 0xf5, 0xe0, 0x0e, 0xfd, 0xc2, 0x83, 0x3a, 0xf4, 0xb7,
- 0x60, 0xbc, 0xa6, 0x19, 0x89, 0xe3, 0x91, 0xbc, 0xd0, 0x71, 0x92, 0x68, 0xf6, 0x64, 0xae, 0x9d,
- 0x5b, 0x30, 0x99, 0xe0, 0x24, 0x57, 0xf4, 0x71, 0x18, 0xe1, 0xe3, 0x2c, 0x6a, 0xe1, 0x8e, 0x4d,
- 0x4f, 0xe4, 0xcf, 0x17, 0xbd, 0x0a, 0xae, 0xcd, 0xd5, 0x8a, 0x63, 0x83, 0x99, 0xfd, 0xa7, 0x16,
- 0xa0, 0xa5, 0xd6, 0x36, 0x69, 0x92, 0xc0, 0x69, 0xc4, 0xb6, 0x9e, 0x1f, 0xb2, 0x60, 0x9a, 0xa4,
- 0xc0, 0x0b, 0x7e, 0xb3, 0x29, 0x2e, 0x8b, 0x39, 0xfa, 0x8c, 0xa5, 0x9c, 0x32, 0xea, 0x55, 0xd1,
- 0x74, 0x1e, 0x05, 0xce, 0xad, 0x0f, 0xad, 0xc2, 0x14, 0x3f, 0x25, 0x15, 0x42, 0x73, 0x92, 0x7a,
- 0x58, 0x30, 0x9e, 0x5a, 0x4f, 0x93, 0xe0, 0xac, 0x72, 0xf6, 0x37, 0x47, 0x20, 0xb7, 0x15, 0xef,
- 0x1a, 0xb9, 0xde, 0x35, 0x72, 0xbd, 0x6b, 0xe4, 0x7a, 0xd7, 0xc8, 0xf5, 0xae, 0x91, 0xeb, 0x5d,
- 0x23, 0xd7, 0x51, 0x18, 0xb9, 0xfe, 0x8a, 0x05, 0x27, 0xd5, 0x59, 0x63, 0xdc, 0xae, 0x3f, 0x03,
- 0x53, 0x7c, 0xb9, 0x19, 0xde, 0xbb, 0xe2, 0x6c, 0xbd, 0x94, 0x39, 0x73, 0x13, 0x5e, 0xe6, 0x46,
- 0x41, 0xfe, 0x5c, 0x27, 0x03, 0x81, 0xb3, 0xaa, 0xb1, 0x7f, 0x71, 0x08, 0xfa, 0x97, 0x76, 0x89,
- 0x17, 0x1d, 0xc3, 0x3d, 0xa4, 0x06, 0x63, 0xae, 0xb7, 0xeb, 0x37, 0x76, 0x49, 0x9d, 0xe3, 0x0f,
- 0x73, 0x5d, 0x3e, 0x25, 0x58, 0x8f, 0xad, 0x18, 0x2c, 0x70, 0x82, 0xe5, 0x83, 0x30, 0x15, 0x5c,
- 0x86, 0x01, 0x7e, 0x52, 0x08, 0x3b, 0x41, 0xe6, 0x9e, 0xcd, 0x3a, 0x51, 0x9c, 0x7f, 0xb1, 0x19,
- 0x83, 0x9f, 0x44, 0xa2, 0x38, 0xfa, 0x34, 0x8c, 0x6d, 0xba, 0x41, 0x18, 0xad, 0xbb, 0x4d, 0x12,
- 0x46, 0x4e, 0xb3, 0x75, 0x1f, 0xa6, 0x01, 0xd5, 0x0f, 0xcb, 0x06, 0x27, 0x9c, 0xe0, 0x8c, 0xb6,
- 0x60, 0xb4, 0xe1, 0xe8, 0x55, 0x0d, 0x1e, 0xba, 0x2a, 0x75, 0x3a, 0x5c, 0xd3, 0x19, 0x61, 0x93,
- 0x2f, 0x5d, 0x4e, 0x35, 0xa6, 0xdd, 0x1e, 0x62, 0xba, 0x07, 0xb5, 0x9c, 0xb8, 0x5a, 0x9b, 0xe3,
- 0xa8, 0x34, 0xc5, 0x5c, 0xc4, 0x4b, 0xa6, 0x34, 0xa5, 0x39, 0x82, 0x7f, 0x0a, 0x4a, 0x84, 0x76,
- 0x21, 0x65, 0x2c, 0x0e, 0x98, 0x8b, 0xbd, 0xb5, 0x75, 0xd5, 0xad, 0x05, 0xbe, 0x69, 0x94, 0x59,
- 0x92, 0x9c, 0x70, 0xcc, 0x14, 0x2d, 0xc0, 0x40, 0x48, 0x02, 0x57, 0x29, 0x7e, 0x3b, 0x0c, 0x23,
- 0x23, 0xe3, 0xef, 0xc1, 0xf8, 0x6f, 0x2c, 0x8a, 0xd2, 0xe9, 0xe5, 0x30, 0xbd, 0x29, 0x3b, 0x0c,
- 0xb4, 0xe9, 0x35, 0xc7, 0xa0, 0x58, 0x60, 0xd1, 0x6b, 0x30, 0x18, 0x90, 0x06, 0xb3, 0xfa, 0x8d,
- 0xf6, 0x3e, 0xc9, 0xb9, 0x11, 0x91, 0x97, 0xc3, 0x92, 0x01, 0xba, 0x0a, 0x28, 0x20, 0x54, 0x1a,
- 0x73, 0xbd, 0x2d, 0xe5, 0x38, 0x2d, 0x36, 0x5a, 0x25, 0xf5, 0xe2, 0x98, 0x42, 0x3e, 0x05, 0xc4,
- 0x19, 0xc5, 0xd0, 0x65, 0x98, 0x54, 0xd0, 0x15, 0x2f, 0x8c, 0x1c, 0xba, 0xc1, 0x8d, 0x33, 0x5e,
- 0x4a, 0x19, 0x82, 0x93, 0x04, 0x38, 0x5d, 0xc6, 0xfe, 0x92, 0x05, 0xbc, 0x9f, 0x8f, 0x41, 0x05,
- 0xf0, 0xaa, 0xa9, 0x02, 0x38, 0x9d, 0x3b, 0x72, 0x39, 0xd7, 0xff, 0x2f, 0x59, 0x30, 0xac, 0x8d,
- 0x6c, 0x3c, 0x67, 0xad, 0x0e, 0x73, 0xb6, 0x0d, 0x13, 0x74, 0xa6, 0x5f, 0xdf, 0x08, 0x49, 0xb0,
- 0x4b, 0xea, 0x6c, 0x62, 0x16, 0xee, 0x6f, 0x62, 0x2a, 0x27, 0xcd, 0x6b, 0x09, 0x86, 0x38, 0x55,
- 0x85, 0xfd, 0x29, 0xd9, 0x54, 0xe5, 0xd3, 0x5a, 0x53, 0x63, 0x9e, 0xf0, 0x69, 0x55, 0xa3, 0x8a,
- 0x63, 0x1a, 0xba, 0xd4, 0xb6, 0xfd, 0x30, 0x4a, 0xfa, 0xb4, 0x5e, 0xf1, 0xc3, 0x08, 0x33, 0x8c,
- 0xfd, 0x3c, 0xc0, 0xd2, 0x1d, 0x52, 0xe3, 0x33, 0x56, 0xbf, 0xa1, 0x58, 0xf9, 0x37, 0x14, 0xfb,
- 0x77, 0x2c, 0x18, 0x5b, 0x5e, 0x30, 0x4e, 0xae, 0x59, 0x00, 0x7e, 0xad, 0xba, 0x75, 0x6b, 0x4d,
- 0xfa, 0x6a, 0x70, 0x73, 0xb5, 0x82, 0x62, 0x8d, 0x02, 0x9d, 0x86, 0x62, 0xa3, 0xed, 0x09, 0x1d,
- 0xe5, 0x20, 0x3d, 0x1e, 0xaf, 0xb5, 0x3d, 0x4c, 0x61, 0xda, 0x33, 0xa0, 0x62, 0xcf, 0xcf, 0x80,
- 0xba, 0x86, 0xff, 0x40, 0x65, 0xe8, 0xbf, 0x7d, 0xdb, 0xad, 0xf3, 0x47, 0xd6, 0xc2, 0x8f, 0xe4,
- 0xd6, 0xad, 0x95, 0xc5, 0x10, 0x73, 0xb8, 0xfd, 0x85, 0x22, 0xcc, 0x2c, 0x37, 0xc8, 0x9d, 0xb7,
- 0xf9, 0xd0, 0xbc, 0xd7, 0x47, 0x4c, 0x87, 0xd3, 0xf6, 0x1c, 0xf6, 0xa1, 0x5a, 0xf7, 0xfe, 0xd8,
- 0x84, 0x41, 0xee, 0xd2, 0x29, 0x9f, 0x9d, 0x67, 0xda, 0xe6, 0xf2, 0x3b, 0x64, 0x96, 0xbb, 0x86,
- 0x0a, 0xdb, 0x9c, 0x3a, 0x30, 0x05, 0x14, 0x4b, 0xe6, 0x33, 0x2f, 0xc3, 0x88, 0x4e, 0x79, 0xa8,
- 0x27, 0xa3, 0xdf, 0x53, 0x84, 0x09, 0xda, 0x82, 0x07, 0x3a, 0x10, 0x37, 0xd2, 0x03, 0x71, 0xd4,
- 0xcf, 0x06, 0xbb, 0x8f, 0xc6, 0x9b, 0xc9, 0xd1, 0xb8, 0x94, 0x37, 0x1a, 0xc7, 0x3d, 0x06, 0xdf,
- 0x6b, 0xc1, 0xd4, 0x72, 0xc3, 0xaf, 0xed, 0x24, 0x9e, 0xf6, 0xbd, 0x08, 0xc3, 0x74, 0x3b, 0x0e,
- 0x8d, 0x28, 0x17, 0x46, 0xdc, 0x13, 0x81, 0xc2, 0x3a, 0x9d, 0x56, 0xec, 0xc6, 0x8d, 0x95, 0xc5,
- 0xac, 0x70, 0x29, 0x02, 0x85, 0x75, 0x3a, 0xfb, 0xb7, 0x2c, 0x38, 0x73, 0x79, 0x61, 0x29, 0x9e,
- 0x8a, 0xa9, 0x88, 0x2d, 0xe7, 0x61, 0xa0, 0x55, 0xd7, 0x9a, 0x12, 0xeb, 0x70, 0x17, 0x59, 0x2b,
- 0x04, 0xf6, 0x9d, 0x12, 0x8d, 0xe8, 0x06, 0xc0, 0x65, 0x5c, 0x59, 0x10, 0xfb, 0xae, 0x34, 0xd9,
- 0x58, 0xb9, 0x26, 0x9b, 0x27, 0x60, 0x90, 0x9e, 0x0b, 0x6e, 0x4d, 0xb6, 0x9b, 0x5b, 0xdf, 0x39,
- 0x08, 0x4b, 0x9c, 0xfd, 0x73, 0x16, 0x4c, 0x5d, 0x76, 0x23, 0x7a, 0x68, 0x27, 0x43, 0x92, 0xd0,
- 0x53, 0x3b, 0x74, 0x23, 0x3f, 0xd8, 0x4b, 0x86, 0x24, 0xc1, 0x0a, 0x83, 0x35, 0x2a, 0xfe, 0x41,
- 0xbb, 0x2e, 0x7b, 0xa3, 0x50, 0x30, 0x8d, 0x64, 0x58, 0xc0, 0xb1, 0xa2, 0xa0, 0xfd, 0x55, 0x77,
- 0x03, 0xa6, 0x5f, 0xdc, 0x13, 0x1b, 0xb7, 0xea, 0xaf, 0x45, 0x89, 0xc0, 0x31, 0x8d, 0xfd, 0xc7,
- 0x16, 0x94, 0x2f, 0x37, 0xda, 0x61, 0x44, 0x82, 0xcd, 0x30, 0x67, 0xd3, 0x7d, 0x1e, 0x4a, 0x44,
- 0x6a, 0xf3, 0xe5, 0x63, 0x4a, 0x29, 0x88, 0x2a, 0x35, 0x3f, 0x8f, 0x8c, 0xa2, 0xe8, 0x7a, 0x78,
- 0x7f, 0x7c, 0xb8, 0x07, 0xa4, 0xcb, 0x80, 0x88, 0x5e, 0x97, 0x1e, 0x2a, 0x86, 0xc5, 0x9c, 0x58,
- 0x4a, 0x61, 0x71, 0x46, 0x09, 0xfb, 0xc7, 0x2d, 0x38, 0xa9, 0x3e, 0xf8, 0x1d, 0xf7, 0x99, 0xf6,
- 0x57, 0x0b, 0x30, 0x7a, 0x65, 0x7d, 0xbd, 0x72, 0x99, 0x44, 0xda, 0xac, 0xec, 0x6c, 0xa3, 0xc7,
- 0x9a, 0xa9, 0xb1, 0xd3, 0x1d, 0xb1, 0x1d, 0xb9, 0x8d, 0x59, 0x1e, 0x71, 0x6c, 0x76, 0xc5, 0x8b,
- 0xae, 0x07, 0xd5, 0x28, 0x70, 0xbd, 0xad, 0xcc, 0x99, 0x2e, 0x65, 0x96, 0x62, 0x9e, 0xcc, 0x82,
- 0x9e, 0x87, 0x01, 0x16, 0xf2, 0x4c, 0x0e, 0xc2, 0xc3, 0xea, 0x8a, 0xc5, 0xa0, 0x07, 0xfb, 0xe5,
- 0xd2, 0x0d, 0xbc, 0xc2, 0xff, 0x60, 0x41, 0x8a, 0x6e, 0xc0, 0xf0, 0x76, 0x14, 0xb5, 0xae, 0x10,
- 0xa7, 0x4e, 0x02, 0xb9, 0xcb, 0x9e, 0xcd, 0xda, 0x65, 0x69, 0x27, 0x70, 0xb2, 0x78, 0x63, 0x8a,
- 0x61, 0x21, 0xd6, 0xf9, 0xd8, 0x55, 0x80, 0x18, 0x77, 0x44, 0x56, 0x16, 0x7b, 0x1d, 0x4a, 0xf4,
- 0x73, 0xe7, 0x1a, 0xae, 0xd3, 0xd9, 0x8e, 0xfd, 0x34, 0x94, 0xa4, 0x95, 0x3a, 0x14, 0xf1, 0x11,
- 0xd8, 0x89, 0x24, 0x8d, 0xd8, 0x21, 0x8e, 0xf1, 0xf6, 0x26, 0x9c, 0x60, 0xbe, 0xaa, 0x4e, 0xb4,
- 0x6d, 0xcc, 0xbe, 0xee, 0xc3, 0xfc, 0x8c, 0xb8, 0xb1, 0xf1, 0x36, 0x4f, 0x6b, 0x0f, 0x7a, 0x47,
- 0x24, 0xc7, 0xf8, 0xf6, 0x66, 0x7f, 0xb3, 0x0f, 0x1e, 0x5e, 0xa9, 0xe6, 0x87, 0xec, 0x79, 0x09,
- 0x46, 0xb8, 0x20, 0x48, 0x07, 0xdd, 0x69, 0x88, 0x7a, 0x95, 0x6e, 0x73, 0x5d, 0xc3, 0x61, 0x83,
- 0x12, 0x9d, 0x81, 0xa2, 0xfb, 0x96, 0x97, 0x7c, 0xee, 0xb6, 0xf2, 0xfa, 0x1a, 0xa6, 0x70, 0x8a,
- 0xa6, 0x32, 0x25, 0xdf, 0xac, 0x15, 0x5a, 0xc9, 0x95, 0xaf, 0xc2, 0x98, 0x1b, 0xd6, 0x42, 0x77,
- 0xc5, 0xa3, 0x2b, 0x50, 0x5b, 0xc3, 0x4a, 0x9b, 0x40, 0x1b, 0xad, 0xb0, 0x38, 0x41, 0xad, 0x9d,
- 0x1c, 0xfd, 0x3d, 0xcb, 0xa5, 0x5d, 0x03, 0x06, 0xd0, 0x8d, 0xbd, 0xc5, 0xbe, 0x2e, 0x64, 0x9a,
- 0x70, 0xb1, 0xb1, 0xf3, 0x0f, 0x0e, 0xb1, 0xc4, 0xd1, 0xab, 0x5a, 0x6d, 0xdb, 0x69, 0xcd, 0xb5,
- 0xa3, 0xed, 0x45, 0x37, 0xac, 0xf9, 0xbb, 0x24, 0xd8, 0x63, 0xb7, 0xec, 0xa1, 0xf8, 0xaa, 0xa6,
- 0x10, 0x0b, 0x57, 0xe6, 0x2a, 0x94, 0x12, 0xa7, 0xcb, 0xa0, 0x39, 0x18, 0x97, 0xc0, 0x2a, 0x09,
- 0xd9, 0xe6, 0x3e, 0xcc, 0xd8, 0xa8, 0x07, 0x68, 0x02, 0xac, 0x98, 0x24, 0xe9, 0x4d, 0xd1, 0x15,
- 0x8e, 0x42, 0x74, 0xfd, 0x00, 0x8c, 0xba, 0x9e, 0x1b, 0xb9, 0x4e, 0xe4, 0x73, 0x33, 0x0e, 0xbf,
- 0x50, 0x33, 0xd5, 0xf1, 0x8a, 0x8e, 0xc0, 0x26, 0x9d, 0xfd, 0x9f, 0xfa, 0x60, 0x92, 0x0d, 0xdb,
- 0xbb, 0x33, 0xec, 0x3b, 0x69, 0x86, 0xdd, 0x48, 0xcf, 0xb0, 0xa3, 0x90, 0xc9, 0xef, 0x7b, 0x9a,
- 0x7d, 0x1a, 0x4a, 0xea, 0xcd, 0x9d, 0x7c, 0x74, 0x6b, 0xe5, 0x3c, 0xba, 0xed, 0x7e, 0x2e, 0x4b,
- 0xcf, 0xb0, 0x62, 0xa6, 0x67, 0xd8, 0x97, 0x2d, 0x88, 0x4d, 0x06, 0xe8, 0x75, 0x28, 0xb5, 0x7c,
- 0xe6, 0x68, 0x1a, 0x48, 0xef, 0xed, 0xc7, 0x3b, 0xda, 0x1c, 0x78, 0xd4, 0xb2, 0x80, 0xf7, 0x42,
- 0x45, 0x16, 0xc5, 0x31, 0x17, 0x74, 0x15, 0x06, 0x5b, 0x01, 0xa9, 0x46, 0x2c, 0xa4, 0x4e, 0xef,
- 0x0c, 0xf9, 0xac, 0xe1, 0x05, 0xb1, 0xe4, 0x60, 0xff, 0x67, 0x0b, 0x26, 0x92, 0xa4, 0xe8, 0x43,
- 0xd0, 0x47, 0xee, 0x90, 0x9a, 0x68, 0x6f, 0xe6, 0x21, 0x1b, 0x2b, 0x1d, 0x78, 0x07, 0xd0, 0xff,
- 0x98, 0x95, 0x42, 0x57, 0x60, 0x90, 0x9e, 0xb0, 0x97, 0x55, 0xf8, 0xb8, 0x47, 0xf3, 0x4e, 0x69,
- 0x25, 0xaa, 0xf0, 0xc6, 0x09, 0x10, 0x96, 0xc5, 0x99, 0x3b, 0x56, 0xad, 0x55, 0xa5, 0x97, 0x97,
- 0xa8, 0xd3, 0x1d, 0x7b, 0x7d, 0xa1, 0xc2, 0x89, 0x04, 0x37, 0xee, 0x8e, 0x25, 0x81, 0x38, 0x66,
- 0x62, 0xff, 0xbc, 0x05, 0xc0, 0xbd, 0xcf, 0x1c, 0x6f, 0x8b, 0x1c, 0x83, 0x9e, 0x7c, 0x11, 0xfa,
- 0xc2, 0x16, 0xa9, 0x75, 0xf2, 0x81, 0x8e, 0xdb, 0x53, 0x6d, 0x91, 0x5a, 0x3c, 0xe3, 0xe8, 0x3f,
- 0xcc, 0x4a, 0xdb, 0xdf, 0x07, 0x30, 0x16, 0x93, 0xad, 0x44, 0xa4, 0x89, 0x9e, 0x35, 0x02, 0x75,
- 0x9c, 0x4e, 0x04, 0xea, 0x28, 0x31, 0x6a, 0x4d, 0x25, 0xfb, 0x69, 0x28, 0x36, 0x9d, 0x3b, 0x42,
- 0xe7, 0xf6, 0x74, 0xe7, 0x66, 0x50, 0xfe, 0xb3, 0xab, 0xce, 0x1d, 0x7e, 0x2d, 0x7d, 0x5a, 0xae,
- 0x90, 0x55, 0xe7, 0x4e, 0x57, 0x3f, 0x5d, 0x5a, 0x09, 0xab, 0xcb, 0xf5, 0x84, 0x63, 0x55, 0x4f,
- 0x75, 0xb9, 0x5e, 0xb2, 0x2e, 0xd7, 0xeb, 0xa1, 0x2e, 0xd7, 0x43, 0x77, 0x61, 0x50, 0xf8, 0x3d,
- 0x8a, 0x50, 0x5e, 0x17, 0x7b, 0xa8, 0x4f, 0xb8, 0x4d, 0xf2, 0x3a, 0x2f, 0xca, 0x6b, 0xb7, 0x80,
- 0x76, 0xad, 0x57, 0x56, 0x88, 0xfe, 0xaa, 0x05, 0x63, 0xe2, 0x37, 0x26, 0x6f, 0xb5, 0x49, 0x18,
- 0x09, 0xb1, 0xf4, 0xfd, 0xbd, 0xb7, 0x41, 0x14, 0xe4, 0x4d, 0x79, 0xbf, 0x3c, 0x67, 0x4c, 0x64,
- 0xd7, 0x16, 0x25, 0x5a, 0x81, 0xfe, 0xae, 0x05, 0x27, 0x9a, 0xce, 0x1d, 0x5e, 0x23, 0x87, 0x61,
- 0x27, 0x72, 0x7d, 0xe1, 0x3f, 0xf0, 0xa1, 0xde, 0x86, 0x3f, 0x55, 0x9c, 0x37, 0x52, 0xda, 0x1f,
- 0x4f, 0x64, 0x91, 0x74, 0x6d, 0x6a, 0x66, 0xbb, 0x66, 0x36, 0x61, 0x48, 0xce, 0xb7, 0x07, 0xe9,
- 0x64, 0xcd, 0xea, 0x11, 0x73, 0xed, 0x81, 0xd6, 0xf3, 0x69, 0x18, 0xd1, 0xe7, 0xd8, 0x03, 0xad,
- 0xeb, 0x2d, 0x98, 0xca, 0x98, 0x4b, 0x0f, 0xb4, 0xca, 0xdb, 0x70, 0x3a, 0x77, 0x7e, 0x3c, 0x50,
- 0x27, 0xf9, 0xaf, 0x5a, 0xfa, 0x3e, 0x78, 0x0c, 0xc6, 0x8a, 0x05, 0xd3, 0x58, 0x71, 0xb6, 0xf3,
- 0xca, 0xc9, 0xb1, 0x58, 0xbc, 0xa9, 0x37, 0x9a, 0xee, 0xea, 0xe8, 0x35, 0x18, 0x68, 0x50, 0x88,
- 0xf4, 0x9e, 0xb5, 0xbb, 0xaf, 0xc8, 0x58, 0x98, 0x64, 0xf0, 0x10, 0x0b, 0x0e, 0xf6, 0x2f, 0x59,
- 0xd0, 0x77, 0x0c, 0x3d, 0x81, 0xcd, 0x9e, 0x78, 0x36, 0x97, 0xb5, 0x88, 0x6a, 0x3e, 0x8b, 0x9d,
- 0xdb, 0x4b, 0x77, 0x22, 0xe2, 0x85, 0xec, 0x44, 0xce, 0xec, 0x98, 0x9f, 0xb6, 0x60, 0xea, 0x9a,
- 0xef, 0xd4, 0xe7, 0x9d, 0x86, 0xe3, 0xd5, 0x48, 0xb0, 0xe2, 0x6d, 0x1d, 0xca, 0xf5, 0xbb, 0xd0,
- 0xd5, 0xf5, 0x7b, 0x41, 0x7a, 0x4e, 0xf5, 0xe5, 0x8f, 0x1f, 0x95, 0xa4, 0x93, 0xa1, 0x8b, 0x0c,
- 0x1f, 0xdf, 0x6d, 0x40, 0x7a, 0x2b, 0xc5, 0x03, 0x28, 0x0c, 0x83, 0x2e, 0x6f, 0xaf, 0x18, 0xc4,
- 0x27, 0xb3, 0x25, 0xdc, 0xd4, 0xe7, 0x69, 0x4f, 0x7b, 0x38, 0x00, 0x4b, 0x46, 0xf6, 0x4b, 0x90,
- 0x19, 0x6a, 0xa2, 0xbb, 0x5e, 0xc2, 0xfe, 0x18, 0x4c, 0xb2, 0x92, 0x87, 0xd4, 0x0c, 0xd8, 0x09,
- 0x6d, 0x6a, 0x46, 0xd8, 0x4c, 0xfb, 0xf3, 0x16, 0x8c, 0xaf, 0x25, 0xa2, 0x09, 0x9e, 0x67, 0xf6,
- 0xd7, 0x0c, 0x25, 0x7e, 0x95, 0x41, 0xb1, 0xc0, 0x1e, 0xb9, 0x92, 0xeb, 0xcf, 0x2d, 0x88, 0xa3,
- 0xbf, 0x1c, 0x83, 0xf8, 0xb6, 0x60, 0x88, 0x6f, 0x99, 0x82, 0xac, 0x6a, 0x4e, 0x9e, 0xf4, 0x86,
- 0xae, 0xaa, 0xb8, 0x68, 0x1d, 0x64, 0xd8, 0x98, 0x0d, 0x9f, 0x8a, 0x63, 0x66, 0xf0, 0x34, 0x19,
- 0x29, 0xcd, 0xfe, 0xdd, 0x02, 0x20, 0x45, 0xdb, 0x73, 0xdc, 0xb6, 0x74, 0x89, 0xa3, 0x89, 0xdb,
- 0xb6, 0x0b, 0x88, 0x79, 0x10, 0x04, 0x8e, 0x17, 0x72, 0xb6, 0xae, 0x50, 0xeb, 0x1d, 0xce, 0x3d,
- 0x61, 0x46, 0xbe, 0x0d, 0xbb, 0x96, 0xe2, 0x86, 0x33, 0x6a, 0xd0, 0x3c, 0x43, 0xfa, 0x7b, 0xf5,
- 0x0c, 0x19, 0xe8, 0xf2, 0xc8, 0xf1, 0x2b, 0x16, 0x8c, 0xaa, 0x6e, 0x7a, 0x87, 0xb8, 0xc2, 0xab,
- 0xf6, 0xe4, 0x6c, 0xa0, 0x15, 0xad, 0xc9, 0xec, 0x60, 0xf9, 0x2e, 0xf6, 0x58, 0xd5, 0x69, 0xb8,
- 0x77, 0x89, 0x8a, 0xf3, 0x59, 0x16, 0x8f, 0x4f, 0x05, 0xf4, 0x60, 0xbf, 0x3c, 0xaa, 0xfe, 0xf1,
- 0x38, 0xe6, 0x71, 0x11, 0xba, 0x25, 0x8f, 0x27, 0xa6, 0x22, 0x7a, 0x11, 0xfa, 0x5b, 0xdb, 0x4e,
- 0x48, 0x12, 0x4f, 0x86, 0xfa, 0x2b, 0x14, 0x78, 0xb0, 0x5f, 0x1e, 0x53, 0x05, 0x18, 0x04, 0x73,
- 0xea, 0xde, 0xa3, 0xe1, 0xa5, 0x27, 0x67, 0xd7, 0x68, 0x78, 0x7f, 0x6a, 0x41, 0xdf, 0x9a, 0x5f,
- 0x3f, 0x8e, 0x2d, 0xe0, 0x55, 0x63, 0x0b, 0x78, 0x24, 0x2f, 0xc5, 0x44, 0xee, 0xea, 0x5f, 0x4e,
- 0xac, 0xfe, 0xb3, 0xb9, 0x1c, 0x3a, 0x2f, 0xfc, 0x26, 0x0c, 0xb3, 0xc4, 0x15, 0xe2, 0x79, 0xd4,
- 0xf3, 0xc6, 0x82, 0x2f, 0x27, 0x16, 0xfc, 0xb8, 0x46, 0xaa, 0xad, 0xf4, 0xa7, 0x60, 0x50, 0xbc,
- 0xb7, 0x49, 0xbe, 0xf9, 0x15, 0xb4, 0x58, 0xe2, 0xed, 0x9f, 0x28, 0x82, 0x91, 0x28, 0x03, 0xfd,
- 0x8a, 0x05, 0xb3, 0x01, 0xf7, 0xc3, 0xad, 0x2f, 0xb6, 0x03, 0xd7, 0xdb, 0xaa, 0xd6, 0xb6, 0x49,
- 0xbd, 0xdd, 0x70, 0xbd, 0xad, 0x95, 0x2d, 0xcf, 0x57, 0xe0, 0xa5, 0x3b, 0xa4, 0xd6, 0x66, 0x66,
- 0xb7, 0x2e, 0x59, 0x39, 0x94, 0x3f, 0xfb, 0x73, 0xf7, 0xf6, 0xcb, 0xb3, 0xf8, 0x50, 0xbc, 0xf1,
- 0x21, 0xdb, 0x82, 0x7e, 0xcb, 0x82, 0x8b, 0x3c, 0x7f, 0x44, 0xef, 0xed, 0xef, 0x70, 0x5b, 0xae,
- 0x48, 0x56, 0x31, 0x93, 0x75, 0x12, 0x34, 0xe7, 0x3f, 0x20, 0x3a, 0xf4, 0x62, 0xe5, 0x70, 0x75,
- 0xe1, 0xc3, 0x36, 0xce, 0xfe, 0xc7, 0x45, 0x18, 0x15, 0x51, 0xd3, 0xc4, 0x19, 0xf0, 0xa2, 0x31,
- 0x25, 0x1e, 0x4d, 0x4c, 0x89, 0x49, 0x83, 0xf8, 0x68, 0xb6, 0xff, 0x10, 0x26, 0xe9, 0xe6, 0x7c,
- 0x85, 0x38, 0x41, 0xb4, 0x41, 0x1c, 0xee, 0xf0, 0x55, 0x3c, 0xf4, 0xee, 0xaf, 0xf4, 0x93, 0xd7,
- 0x92, 0xcc, 0x70, 0x9a, 0xff, 0x77, 0xd2, 0x99, 0xe3, 0xc1, 0x44, 0x2a, 0xf0, 0xdd, 0x1b, 0x50,
- 0x52, 0x8f, 0x45, 0xc4, 0xa6, 0xd3, 0x39, 0x7e, 0x64, 0x92, 0x03, 0x57, 0x7f, 0xc5, 0x0f, 0x95,
- 0x62, 0x76, 0xf6, 0xdf, 0x2f, 0x18, 0x15, 0xf2, 0x41, 0x5c, 0x83, 0x21, 0x27, 0x0c, 0xdd, 0x2d,
- 0x8f, 0xd4, 0x3b, 0x69, 0x28, 0x53, 0xd5, 0xb0, 0x07, 0x3b, 0x73, 0xa2, 0x24, 0x56, 0x3c, 0xd0,
- 0x15, 0xee, 0x56, 0xb7, 0x4b, 0x3a, 0xa9, 0x27, 0x53, 0xdc, 0x40, 0x3a, 0xde, 0xed, 0x12, 0x2c,
- 0xca, 0xa3, 0x4f, 0x70, 0xbf, 0xc7, 0xab, 0x9e, 0x7f, 0xdb, 0xbb, 0xec, 0xfb, 0x32, 0xe8, 0x46,
- 0x6f, 0x0c, 0x27, 0xa5, 0xb7, 0xa3, 0x2a, 0x8e, 0x4d, 0x6e, 0xbd, 0x45, 0x92, 0xfd, 0x0c, 0xb0,
- 0x78, 0xf9, 0xe6, 0xdb, 0xec, 0x10, 0x11, 0x18, 0x17, 0x21, 0xf9, 0x24, 0x4c, 0xf4, 0x5d, 0xe6,
- 0x55, 0xce, 0x2c, 0x1d, 0x2b, 0xd2, 0xaf, 0x9a, 0x2c, 0x70, 0x92, 0xa7, 0xfd, 0xb3, 0x16, 0xb0,
- 0x77, 0xaa, 0xc7, 0x20, 0x8f, 0x7c, 0xd8, 0x94, 0x47, 0xa6, 0xf3, 0x3a, 0x39, 0x47, 0x14, 0x79,
- 0x81, 0xcf, 0xac, 0x4a, 0xe0, 0xdf, 0xd9, 0x13, 0xce, 0x2a, 0xdd, 0xef, 0x1f, 0xf6, 0xff, 0xb6,
- 0xf8, 0x26, 0x16, 0xbf, 0xea, 0xff, 0x2c, 0x0c, 0xd5, 0x9c, 0x96, 0x53, 0xe3, 0x59, 0x9d, 0x72,
- 0x35, 0x7a, 0x46, 0xa1, 0xd9, 0x05, 0x51, 0x82, 0x6b, 0xa8, 0x64, 0x68, 0xc7, 0x21, 0x09, 0xee,
- 0xaa, 0x95, 0x52, 0x55, 0xce, 0xec, 0xc0, 0xa8, 0xc1, 0xec, 0x81, 0xaa, 0x33, 0x3e, 0xcb, 0x8f,
- 0x58, 0x15, 0x8a, 0xb4, 0x09, 0x93, 0x9e, 0xf6, 0x9f, 0x1e, 0x28, 0xf2, 0x72, 0xf9, 0x78, 0xb7,
- 0x43, 0x94, 0x9d, 0x3e, 0xda, 0x13, 0xd8, 0x04, 0x1b, 0x9c, 0xe6, 0x6c, 0xff, 0xa4, 0x05, 0x0f,
- 0xe9, 0x84, 0xda, 0x2b, 0x9b, 0x6e, 0x46, 0x92, 0x45, 0x18, 0xf2, 0x5b, 0x24, 0x70, 0x22, 0x3f,
- 0x10, 0xa7, 0xc6, 0x05, 0xd9, 0xe9, 0xd7, 0x05, 0xfc, 0x40, 0xe4, 0x28, 0x90, 0xdc, 0x25, 0x1c,
- 0xab, 0x92, 0xf4, 0xf6, 0xc9, 0x3a, 0x23, 0x14, 0xef, 0xa9, 0xd8, 0x1e, 0xc0, 0x2c, 0xe9, 0x21,
- 0x16, 0x18, 0xfb, 0x9b, 0x16, 0x9f, 0x58, 0x7a, 0xd3, 0xd1, 0x5b, 0x30, 0xd1, 0x74, 0xa2, 0xda,
- 0xf6, 0xd2, 0x9d, 0x56, 0xc0, 0x4d, 0x4e, 0xb2, 0x9f, 0x9e, 0xee, 0xd6, 0x4f, 0xda, 0x47, 0xc6,
- 0xae, 0x9c, 0xab, 0x09, 0x66, 0x38, 0xc5, 0x1e, 0x6d, 0xc0, 0x30, 0x83, 0xb1, 0xa7, 0x82, 0x61,
- 0x27, 0xd1, 0x20, 0xaf, 0x36, 0xe5, 0x8c, 0xb0, 0x1a, 0xf3, 0xc1, 0x3a, 0x53, 0xfb, 0xcb, 0x45,
- 0xbe, 0xda, 0x99, 0x28, 0xff, 0x14, 0x0c, 0xb6, 0xfc, 0xfa, 0xc2, 0xca, 0x22, 0x16, 0xa3, 0xa0,
- 0x8e, 0x91, 0x0a, 0x07, 0x63, 0x89, 0x47, 0x17, 0x60, 0x48, 0xfc, 0x94, 0x26, 0x42, 0xb6, 0x37,
- 0x0b, 0xba, 0x10, 0x2b, 0x2c, 0x7a, 0x0e, 0xa0, 0x15, 0xf8, 0xbb, 0x6e, 0x9d, 0x85, 0x0e, 0x29,
- 0x9a, 0x7e, 0x44, 0x15, 0x85, 0xc1, 0x1a, 0x15, 0x7a, 0x05, 0x46, 0xdb, 0x5e, 0xc8, 0xc5, 0x11,
- 0x2d, 0x9a, 0xb2, 0xf2, 0x70, 0xb9, 0xa1, 0x23, 0xb1, 0x49, 0x8b, 0xe6, 0x60, 0x20, 0x72, 0x98,
- 0x5f, 0x4c, 0x7f, 0xbe, 0xbb, 0xef, 0x3a, 0xa5, 0xd0, 0x13, 0x08, 0xd1, 0x02, 0x58, 0x14, 0x44,
- 0x6f, 0xc8, 0x57, 0xbb, 0x7c, 0x63, 0x17, 0x7e, 0xf6, 0xbd, 0x1d, 0x02, 0xda, 0x9b, 0x5d, 0xe1,
- 0xbf, 0x6f, 0xf0, 0x42, 0x2f, 0x03, 0x90, 0x3b, 0x11, 0x09, 0x3c, 0xa7, 0xa1, 0xbc, 0xd9, 0x94,
- 0x5c, 0xb0, 0xe8, 0xaf, 0xf9, 0xd1, 0x8d, 0x90, 0x2c, 0x29, 0x0a, 0xac, 0x51, 0xdb, 0xbf, 0x55,
- 0x02, 0x88, 0xe5, 0x76, 0x74, 0x37, 0xb5, 0x71, 0x3d, 0xd3, 0x59, 0xd2, 0x3f, 0xba, 0x5d, 0x0b,
- 0x7d, 0xbf, 0x05, 0xc3, 0x22, 0x42, 0x0a, 0x1b, 0xa1, 0x42, 0xe7, 0x8d, 0xd3, 0x0c, 0xd4, 0x42,
- 0x4b, 0xf0, 0x26, 0x3c, 0x2f, 0x67, 0xa8, 0x86, 0xe9, 0xda, 0x0a, 0xbd, 0x62, 0xf4, 0x3e, 0x79,
- 0x55, 0x2c, 0x1a, 0x5d, 0xa9, 0xae, 0x8a, 0x25, 0x76, 0x46, 0xe8, 0xb7, 0xc4, 0x1b, 0xc6, 0x2d,
- 0xb1, 0x2f, 0xff, 0x59, 0xa2, 0x21, 0xbe, 0x76, 0xbb, 0x20, 0xa2, 0x8a, 0x1e, 0xa2, 0xa0, 0x3f,
- 0xff, 0x79, 0x9e, 0x76, 0x4f, 0xea, 0x12, 0x9e, 0xe0, 0xd3, 0x30, 0x5e, 0x37, 0x85, 0x00, 0x31,
- 0x13, 0x9f, 0xcc, 0xe3, 0x9b, 0x90, 0x19, 0xe2, 0x63, 0x3f, 0x81, 0xc0, 0x49, 0xc6, 0xa8, 0xc2,
- 0x23, 0x56, 0xac, 0x78, 0x9b, 0xbe, 0x78, 0xeb, 0x61, 0xe7, 0x8e, 0xe5, 0x5e, 0x18, 0x91, 0x26,
- 0xa5, 0x8c, 0x4f, 0xf7, 0x35, 0x51, 0x16, 0x2b, 0x2e, 0xe8, 0x35, 0x18, 0x60, 0xef, 0xb3, 0xc2,
- 0xe9, 0xa1, 0x7c, 0x8d, 0xb3, 0x19, 0xba, 0x2f, 0x5e, 0x90, 0xec, 0x6f, 0x88, 0x05, 0x07, 0x74,
- 0x45, 0xbe, 0x7e, 0x0c, 0x57, 0xbc, 0x1b, 0x21, 0x61, 0xaf, 0x1f, 0x4b, 0xf3, 0x8f, 0xc7, 0x0f,
- 0x1b, 0x39, 0x3c, 0x33, 0xcd, 0xa0, 0x51, 0x92, 0x4a, 0x51, 0xe2, 0xbf, 0xcc, 0x5e, 0x28, 0x02,
- 0x0d, 0x65, 0x36, 0xcf, 0xcc, 0x70, 0x18, 0x77, 0xe7, 0x4d, 0x93, 0x05, 0x4e, 0xf2, 0xa4, 0x12,
- 0x29, 0x5f, 0xf5, 0xe2, 0xb5, 0x48, 0xb7, 0xbd, 0x83, 0x5f, 0xc4, 0xd9, 0x69, 0xc4, 0x21, 0x58,
- 0x94, 0x3f, 0x56, 0xf1, 0x60, 0xc6, 0x83, 0x89, 0xe4, 0x12, 0x7d, 0xa0, 0xe2, 0xc8, 0x1f, 0xf6,
- 0xc1, 0x98, 0x39, 0xa5, 0xd0, 0x45, 0x28, 0x09, 0x26, 0x2a, 0x03, 0x88, 0x5a, 0x25, 0xab, 0x12,
- 0x81, 0x63, 0x1a, 0x96, 0xf8, 0x85, 0x15, 0xd7, 0xdc, 0x83, 0xe3, 0xc4, 0x2f, 0x0a, 0x83, 0x35,
- 0x2a, 0x7a, 0xb1, 0xda, 0xf0, 0xfd, 0x48, 0x1d, 0x48, 0x6a, 0xde, 0xcd, 0x33, 0x28, 0x16, 0x58,
- 0x7a, 0x10, 0xed, 0x90, 0xc0, 0x23, 0x0d, 0x33, 0xf2, 0xb6, 0x3a, 0x88, 0xae, 0xea, 0x48, 0x6c,
- 0xd2, 0xd2, 0xe3, 0xd4, 0x0f, 0xd9, 0x44, 0x16, 0xd7, 0xb7, 0xd8, 0xdd, 0xba, 0xca, 0x5f, 0x79,
- 0x4b, 0x3c, 0xfa, 0x18, 0x3c, 0xa4, 0x02, 0x67, 0x61, 0x6e, 0xcd, 0x90, 0x35, 0x0e, 0x18, 0xda,
- 0x96, 0x87, 0x16, 0xb2, 0xc9, 0x70, 0x5e, 0x79, 0xf4, 0x2a, 0x8c, 0x09, 0x11, 0x5f, 0x72, 0x1c,
- 0x34, 0x3d, 0x8c, 0xae, 0x1a, 0x58, 0x9c, 0xa0, 0x96, 0xb1, 0xc3, 0x99, 0x94, 0x2d, 0x39, 0x0c,
- 0xa5, 0x63, 0x87, 0xeb, 0x78, 0x9c, 0x2a, 0x81, 0xe6, 0x60, 0x9c, 0xcb, 0x60, 0xae, 0xb7, 0xc5,
- 0xc7, 0x44, 0x3c, 0xe6, 0x52, 0x4b, 0xea, 0xba, 0x89, 0xc6, 0x49, 0x7a, 0xf4, 0x12, 0x8c, 0x38,
- 0x41, 0x6d, 0xdb, 0x8d, 0x48, 0x2d, 0x6a, 0x07, 0xfc, 0x95, 0x97, 0xe6, 0xa2, 0x35, 0xa7, 0xe1,
- 0xb0, 0x41, 0x69, 0xdf, 0x85, 0xa9, 0x8c, 0xf0, 0x0f, 0x74, 0xe2, 0x38, 0x2d, 0x57, 0x7e, 0x53,
- 0xc2, 0xc3, 0x79, 0xae, 0xb2, 0x22, 0xbf, 0x46, 0xa3, 0xa2, 0xb3, 0x93, 0x85, 0x89, 0xd0, 0x92,
- 0x95, 0xaa, 0xd9, 0xb9, 0x2c, 0x11, 0x38, 0xa6, 0xb1, 0xff, 0x5b, 0x01, 0xc6, 0x33, 0x6c, 0x2b,
- 0x2c, 0x61, 0x66, 0xe2, 0x92, 0x12, 0xe7, 0xc7, 0x34, 0x43, 0xd1, 0x17, 0x0e, 0x11, 0x8a, 0xbe,
- 0xd8, 0x2d, 0x14, 0x7d, 0xdf, 0xdb, 0x09, 0x45, 0x6f, 0xf6, 0x58, 0x7f, 0x4f, 0x3d, 0x96, 0x11,
- 0xbe, 0x7e, 0xe0, 0x90, 0xe1, 0xeb, 0x8d, 0x4e, 0x1f, 0xec, 0xa1, 0xd3, 0x7f, 0xb4, 0x00, 0x13,
- 0x49, 0x57, 0xd2, 0x63, 0xd0, 0xdb, 0xbe, 0x66, 0xe8, 0x6d, 0x2f, 0xf4, 0xf2, 0xf8, 0x36, 0x57,
- 0x87, 0x8b, 0x13, 0x3a, 0xdc, 0xf7, 0xf6, 0xc4, 0xad, 0xb3, 0x3e, 0xf7, 0xa7, 0x0a, 0x70, 0x32,
- 0xf3, 0xf5, 0xef, 0x31, 0xf4, 0xcd, 0x75, 0xa3, 0x6f, 0x9e, 0xed, 0xf9, 0x61, 0x72, 0x6e, 0x07,
- 0xdd, 0x4a, 0x74, 0xd0, 0xc5, 0xde, 0x59, 0x76, 0xee, 0xa5, 0xaf, 0x17, 0xe1, 0x6c, 0x66, 0xb9,
- 0x58, 0xed, 0xb9, 0x6c, 0xa8, 0x3d, 0x9f, 0x4b, 0xa8, 0x3d, 0xed, 0xce, 0xa5, 0x8f, 0x46, 0x0f,
- 0x2a, 0x1e, 0xe8, 0xb2, 0x30, 0x03, 0xf7, 0xa9, 0x03, 0x35, 0x1e, 0xe8, 0x2a, 0x46, 0xd8, 0xe4,
- 0xfb, 0x9d, 0xa4, 0xfb, 0xfc, 0x97, 0x16, 0x9c, 0xce, 0x1c, 0x9b, 0x63, 0xd0, 0x75, 0xad, 0x99,
- 0xba, 0xae, 0xa7, 0x7a, 0x9e, 0xad, 0x39, 0xca, 0xaf, 0x9f, 0xe9, 0xcf, 0xf9, 0x16, 0x76, 0x93,
- 0xbf, 0x0e, 0xc3, 0x4e, 0xad, 0x46, 0xc2, 0x70, 0xd5, 0xaf, 0xab, 0x40, 0xd8, 0xcf, 0xb2, 0x7b,
- 0x56, 0x0c, 0x3e, 0xd8, 0x2f, 0xcf, 0x24, 0x59, 0xc4, 0x68, 0xac, 0x73, 0x40, 0x9f, 0x80, 0xa1,
- 0x50, 0x9c, 0x9b, 0x62, 0xec, 0x9f, 0xef, 0xb1, 0x73, 0x9c, 0x0d, 0xd2, 0x30, 0x23, 0x2e, 0x29,
- 0x4d, 0x85, 0x62, 0x69, 0x46, 0x67, 0x29, 0x1c, 0x69, 0x74, 0x96, 0xe7, 0x00, 0x76, 0xd5, 0x65,
- 0x20, 0xa9, 0x7f, 0xd0, 0xae, 0x09, 0x1a, 0x15, 0xfa, 0x08, 0x4c, 0x84, 0x3c, 0x24, 0xe1, 0x42,
- 0xc3, 0x09, 0xd9, 0x3b, 0x1a, 0x31, 0x0b, 0x59, 0x54, 0xa7, 0x6a, 0x02, 0x87, 0x53, 0xd4, 0x68,
- 0x59, 0xd6, 0xca, 0xe2, 0x27, 0xf2, 0x89, 0x79, 0x3e, 0xae, 0x51, 0xa4, 0xeb, 0x3e, 0x91, 0xec,
- 0x7e, 0xd6, 0xf1, 0x5a, 0x49, 0xf4, 0x09, 0x00, 0x3a, 0x7d, 0x84, 0x1e, 0x62, 0x30, 0x7f, 0xf3,
- 0xa4, 0xbb, 0x4a, 0x3d, 0xd3, 0xb9, 0x99, 0xbd, 0xa9, 0x5d, 0x54, 0x4c, 0xb0, 0xc6, 0x10, 0x39,
- 0x30, 0x1a, 0xff, 0x8b, 0xb3, 0xd9, 0x5e, 0xc8, 0xad, 0x21, 0xc9, 0x9c, 0xa9, 0xbc, 0x17, 0x75,
- 0x16, 0xd8, 0xe4, 0x68, 0xff, 0xf8, 0x20, 0x3c, 0xdc, 0x61, 0x1b, 0x46, 0x73, 0xa6, 0xa9, 0xf7,
- 0xe9, 0xe4, 0xfd, 0x7d, 0x26, 0xb3, 0xb0, 0x71, 0xa1, 0x4f, 0xcc, 0xf6, 0xc2, 0xdb, 0x9e, 0xed,
- 0x3f, 0x6c, 0x69, 0x9a, 0x15, 0xee, 0x54, 0xfa, 0xe1, 0x43, 0x1e, 0x2f, 0x47, 0xa8, 0x6a, 0xd9,
- 0xcc, 0xd0, 0x57, 0x3c, 0xd7, 0x73, 0x73, 0x7a, 0x57, 0x60, 0x7c, 0x35, 0x3b, 0x0e, 0x2f, 0x57,
- 0x65, 0x5c, 0x3e, 0xec, 0xf7, 0x1f, 0x57, 0x4c, 0xde, 0x8f, 0xc9, 0xe8, 0x4b, 0xbc, 0x5e, 0xb1,
- 0xd6, 0x5e, 0x8c, 0xc3, 0x29, 0xa9, 0xb3, 0xf4, 0xd1, 0xcc, 0xe6, 0xea, 0x44, 0xd8, 0x60, 0x75,
- 0xbc, 0x57, 0xef, 0x6f, 0x51, 0x10, 0xe0, 0xdf, 0xb1, 0xe0, 0x4c, 0xc7, 0x88, 0x30, 0xdf, 0x86,
- 0xb2, 0xa1, 0xfd, 0x39, 0x0b, 0xb2, 0x07, 0xdb, 0xf0, 0x28, 0xbb, 0x08, 0xa5, 0x5a, 0x22, 0xef,
- 0x66, 0x1c, 0x1b, 0x41, 0xe5, 0xdc, 0x8c, 0x69, 0x0c, 0xc7, 0xb1, 0x42, 0x57, 0xc7, 0xb1, 0x5f,
- 0xb7, 0x20, 0xb5, 0xbf, 0x1f, 0x83, 0xa0, 0xb1, 0x62, 0x0a, 0x1a, 0x8f, 0xf7, 0xd2, 0x9b, 0x39,
- 0x32, 0xc6, 0x9f, 0x8c, 0xc3, 0xa9, 0x9c, 0x17, 0x79, 0xbb, 0x30, 0xb9, 0x55, 0x23, 0xe6, 0xe3,
- 0xea, 0x4e, 0x41, 0x87, 0x3a, 0xbe, 0xc4, 0xe6, 0xe9, 0x4e, 0x53, 0x24, 0x38, 0x5d, 0x05, 0xfa,
- 0x9c, 0x05, 0x27, 0x9c, 0xdb, 0xe1, 0x12, 0x15, 0x18, 0xdd, 0xda, 0x7c, 0xc3, 0xaf, 0xed, 0xd0,
- 0xd3, 0x58, 0x2e, 0x84, 0x17, 0x32, 0x95, 0x78, 0xb7, 0xaa, 0x29, 0x7a, 0xa3, 0x7a, 0x96, 0xdc,
- 0x3a, 0x8b, 0x0a, 0x67, 0xd6, 0x85, 0xb0, 0x48, 0xed, 0x41, 0xaf, 0xa3, 0x1d, 0x9e, 0xff, 0x67,
- 0x3d, 0x9d, 0xe4, 0x12, 0x90, 0xc4, 0x60, 0xc5, 0x07, 0x7d, 0x0a, 0x4a, 0x5b, 0xf2, 0xa5, 0x6f,
- 0x86, 0x84, 0x15, 0x77, 0x64, 0xe7, 0xf7, 0xcf, 0xdc, 0x12, 0xaf, 0x88, 0x70, 0xcc, 0x14, 0xbd,
- 0x0a, 0x45, 0x6f, 0x33, 0xec, 0x94, 0x1f, 0x3a, 0xe1, 0x72, 0xc9, 0x83, 0x6c, 0xac, 0x2d, 0x57,
- 0x31, 0x2d, 0x88, 0xae, 0x40, 0x31, 0xd8, 0xa8, 0x0b, 0x0d, 0x74, 0xe6, 0x22, 0xc5, 0xf3, 0x8b,
- 0x39, 0xad, 0x62, 0x9c, 0xf0, 0xfc, 0x22, 0xa6, 0x2c, 0x50, 0x05, 0xfa, 0xd9, 0x33, 0x36, 0x21,
- 0xcf, 0x64, 0xde, 0xdc, 0x3a, 0x3c, 0x07, 0xe5, 0x91, 0x38, 0x18, 0x01, 0xe6, 0x8c, 0xd0, 0x3a,
- 0x0c, 0xd4, 0x58, 0x2e, 0x61, 0x21, 0xc0, 0xbc, 0x2f, 0x53, 0xd7, 0xdc, 0x21, 0xc9, 0xb2, 0x50,
- 0xbd, 0x32, 0x0a, 0x2c, 0x78, 0x31, 0xae, 0xa4, 0xb5, 0xbd, 0x19, 0x8a, 0x5c, 0xfb, 0xd9, 0x5c,
- 0x3b, 0xe4, 0x0e, 0x17, 0x5c, 0x19, 0x05, 0x16, 0xbc, 0xd0, 0xcb, 0x50, 0xd8, 0xac, 0x89, 0x27,
- 0x6a, 0x99, 0x4a, 0x67, 0x33, 0x4e, 0xca, 0xfc, 0xc0, 0xbd, 0xfd, 0x72, 0x61, 0x79, 0x01, 0x17,
- 0x36, 0x6b, 0x68, 0x0d, 0x06, 0x37, 0x79, 0x64, 0x05, 0xa1, 0x57, 0x7e, 0x32, 0x3b, 0xe8, 0x43,
- 0x2a, 0xf8, 0x02, 0x7f, 0xee, 0x24, 0x10, 0x58, 0x32, 0x61, 0x99, 0x26, 0x54, 0x84, 0x08, 0x11,
- 0xa0, 0x6e, 0xf6, 0x70, 0x51, 0x3d, 0xb8, 0x7c, 0x19, 0xc7, 0x99, 0xc0, 0x1a, 0x47, 0x3a, 0xab,
- 0x9d, 0xbb, 0xed, 0x80, 0x85, 0x1a, 0x17, 0x91, 0x8c, 0x32, 0x67, 0xf5, 0x9c, 0x24, 0xea, 0x34,
- 0xab, 0x15, 0x11, 0x8e, 0x99, 0xa2, 0x1d, 0x18, 0xdd, 0x0d, 0x5b, 0xdb, 0x44, 0x2e, 0x69, 0x16,
- 0xd8, 0x28, 0x47, 0x3e, 0xba, 0x29, 0x08, 0xdd, 0x20, 0x6a, 0x3b, 0x8d, 0xd4, 0x2e, 0xc4, 0x64,
- 0xd9, 0x9b, 0x3a, 0x33, 0x6c, 0xf2, 0xa6, 0xdd, 0xff, 0x56, 0xdb, 0xdf, 0xd8, 0x8b, 0x88, 0x88,
- 0x2b, 0x97, 0xd9, 0xfd, 0xaf, 0x73, 0x92, 0x74, 0xf7, 0x0b, 0x04, 0x96, 0x4c, 0xd0, 0x4d, 0xd1,
- 0x3d, 0x6c, 0xf7, 0x9c, 0xc8, 0x8f, 0x30, 0x3b, 0x27, 0x89, 0x72, 0x3a, 0x85, 0xed, 0x96, 0x31,
- 0x2b, 0xb6, 0x4b, 0xb6, 0xb6, 0xfd, 0xc8, 0xf7, 0x12, 0x3b, 0xf4, 0x64, 0xfe, 0x2e, 0x59, 0xc9,
- 0xa0, 0x4f, 0xef, 0x92, 0x59, 0x54, 0x38, 0xb3, 0x2e, 0x54, 0x87, 0xb1, 0x96, 0x1f, 0x44, 0xb7,
- 0xfd, 0x40, 0xce, 0x2f, 0xd4, 0x41, 0x2f, 0x66, 0x50, 0x8a, 0x1a, 0x59, 0xc8, 0x46, 0x13, 0x83,
- 0x13, 0x3c, 0xd1, 0x47, 0x61, 0x30, 0xac, 0x39, 0x0d, 0xb2, 0x72, 0x7d, 0x7a, 0x2a, 0xff, 0xf8,
- 0xa9, 0x72, 0x92, 0x9c, 0xd9, 0xc5, 0x03, 0x63, 0x70, 0x12, 0x2c, 0xd9, 0xa1, 0x65, 0xe8, 0x67,
- 0xe9, 0x16, 0x59, 0x10, 0xc4, 0x9c, 0x40, 0xb9, 0x29, 0x07, 0x78, 0xbe, 0x37, 0x31, 0x30, 0xe6,
- 0xc5, 0xe9, 0x1a, 0x10, 0xd7, 0x43, 0x3f, 0x9c, 0x3e, 0x99, 0xbf, 0x06, 0xc4, 0xad, 0xf2, 0x7a,
- 0xb5, 0xd3, 0x1a, 0x50, 0x44, 0x38, 0x66, 0x4a, 0x77, 0x66, 0xba, 0x9b, 0x9e, 0xea, 0xe0, 0xb9,
- 0x95, 0xbb, 0x97, 0xb2, 0x9d, 0x99, 0xee, 0xa4, 0x94, 0x85, 0xfd, 0x07, 0x83, 0x69, 0x99, 0x85,
- 0x29, 0x14, 0xfe, 0x82, 0x95, 0xb2, 0x35, 0xbf, 0xbf, 0x57, 0xfd, 0xe6, 0x11, 0x5e, 0x85, 0x3e,
- 0x67, 0xc1, 0xa9, 0x56, 0xe6, 0x87, 0x08, 0x01, 0xa0, 0x37, 0x35, 0x29, 0xff, 0x74, 0x15, 0x30,
- 0x33, 0x1b, 0x8f, 0x73, 0x6a, 0x4a, 0x5e, 0x37, 0x8b, 0x6f, 0xfb, 0xba, 0xb9, 0x0a, 0x43, 0x35,
- 0x7e, 0x15, 0xe9, 0x98, 0x5b, 0x3f, 0x79, 0xf7, 0x66, 0xa2, 0x84, 0xb8, 0xc3, 0x6c, 0x62, 0xc5,
- 0x02, 0xfd, 0x88, 0x05, 0x67, 0x92, 0x4d, 0xc7, 0x84, 0xa1, 0x45, 0x94, 0x4d, 0xae, 0xcb, 0x58,
- 0x16, 0xdf, 0x9f, 0x92, 0xff, 0x0d, 0xe2, 0x83, 0x6e, 0x04, 0xb8, 0x73, 0x65, 0x68, 0x31, 0x43,
- 0x99, 0x32, 0x60, 0x1a, 0x90, 0x7a, 0x50, 0xa8, 0xbc, 0x00, 0x23, 0x4d, 0xbf, 0xed, 0x45, 0xc2,
- 0xd1, 0x4b, 0x38, 0x9d, 0x30, 0x67, 0x8b, 0x55, 0x0d, 0x8e, 0x0d, 0xaa, 0x84, 0x1a, 0x66, 0xe8,
- 0xbe, 0xd5, 0x30, 0x6f, 0xc2, 0x88, 0xa7, 0x79, 0x26, 0x0b, 0x79, 0xe0, 0x7c, 0x7e, 0x84, 0x5c,
- 0xdd, 0x8f, 0x99, 0xb7, 0x52, 0x87, 0x60, 0x83, 0xdb, 0xf1, 0x7a, 0x80, 0x7d, 0xc9, 0xca, 0x10,
- 0xea, 0xb9, 0x2a, 0xe6, 0x43, 0xa6, 0x2a, 0xe6, 0x7c, 0x52, 0x15, 0x93, 0x32, 0x1e, 0x18, 0x5a,
- 0x98, 0xde, 0xb3, 0x3b, 0xf5, 0x1a, 0x65, 0xd3, 0x6e, 0xc0, 0xb9, 0x6e, 0xc7, 0x12, 0xf3, 0xf8,
- 0xab, 0x2b, 0x53, 0x71, 0xec, 0xf1, 0x57, 0x5f, 0x59, 0xc4, 0x0c, 0xd3, 0x6b, 0xfc, 0x26, 0xfb,
- 0xbf, 0x58, 0x50, 0xac, 0xf8, 0xf5, 0x63, 0xb8, 0xf0, 0x7e, 0xd8, 0xb8, 0xf0, 0x3e, 0x9c, 0x7d,
- 0x20, 0xd6, 0x73, 0x4d, 0x1f, 0x4b, 0x09, 0xd3, 0xc7, 0x99, 0x3c, 0x06, 0x9d, 0x0d, 0x1d, 0x3f,
- 0x5d, 0x84, 0xe1, 0x8a, 0x5f, 0x57, 0xee, 0xf6, 0xff, 0xf4, 0x7e, 0xdc, 0xed, 0x73, 0x73, 0x65,
- 0x68, 0x9c, 0x99, 0xa3, 0xa0, 0x7c, 0x69, 0xfc, 0x6d, 0xe6, 0x75, 0x7f, 0x8b, 0xb8, 0x5b, 0xdb,
- 0x11, 0xa9, 0x27, 0x3f, 0xe7, 0xf8, 0xbc, 0xee, 0xff, 0xa0, 0x00, 0xe3, 0x89, 0xda, 0x51, 0x03,
- 0x46, 0x1b, 0xba, 0x62, 0x5d, 0xcc, 0xd3, 0xfb, 0xd2, 0xc9, 0x0b, 0xaf, 0x65, 0x0d, 0x84, 0x4d,
- 0xe6, 0x68, 0x16, 0x40, 0x59, 0x9a, 0xa5, 0x7a, 0x95, 0x49, 0xfd, 0xca, 0x14, 0x1d, 0x62, 0x8d,
- 0x02, 0xbd, 0x08, 0xc3, 0x91, 0xdf, 0xf2, 0x1b, 0xfe, 0xd6, 0xde, 0x55, 0x22, 0x43, 0x7b, 0x29,
- 0x5f, 0xc4, 0xf5, 0x18, 0x85, 0x75, 0x3a, 0x74, 0x07, 0x26, 0x15, 0x93, 0xea, 0x11, 0x18, 0x1b,
- 0x98, 0x56, 0x61, 0x2d, 0xc9, 0x11, 0xa7, 0x2b, 0xb1, 0x7f, 0xae, 0xc8, 0xbb, 0xd8, 0x8b, 0xdc,
- 0x77, 0x57, 0xc3, 0x3b, 0x7b, 0x35, 0x7c, 0xdd, 0x82, 0x09, 0x5a, 0x3b, 0x73, 0xb4, 0x92, 0xc7,
- 0xbc, 0x8a, 0xc9, 0x6d, 0x75, 0x88, 0xc9, 0x7d, 0x9e, 0xee, 0x9a, 0x75, 0xbf, 0x1d, 0x09, 0xdd,
- 0x9d, 0xb6, 0x2d, 0x52, 0x28, 0x16, 0x58, 0x41, 0x47, 0x82, 0x40, 0x3c, 0x0e, 0xd5, 0xe9, 0x48,
- 0x10, 0x60, 0x81, 0x95, 0x21, 0xbb, 0xfb, 0xb2, 0x43, 0x76, 0xf3, 0xc8, 0xab, 0xc2, 0x25, 0x47,
- 0x08, 0x5c, 0x5a, 0xe4, 0x55, 0xe9, 0xab, 0x13, 0xd3, 0xd8, 0x5f, 0x2d, 0xc2, 0x48, 0xc5, 0xaf,
- 0xc7, 0x56, 0xe6, 0x17, 0x0c, 0x2b, 0xf3, 0xb9, 0x84, 0x95, 0x79, 0x42, 0xa7, 0x7d, 0xd7, 0xa6,
- 0xfc, 0xad, 0xb2, 0x29, 0xff, 0x9a, 0xc5, 0x46, 0x6d, 0x71, 0xad, 0xca, 0xfd, 0xf6, 0xd0, 0x25,
- 0x18, 0x66, 0x1b, 0x0c, 0x7b, 0x8d, 0x2c, 0x4d, 0xaf, 0x2c, 0xdf, 0xd5, 0x5a, 0x0c, 0xc6, 0x3a,
- 0x0d, 0xba, 0x00, 0x43, 0x21, 0x71, 0x82, 0xda, 0xb6, 0xda, 0x5d, 0x85, 0x9d, 0x94, 0xc3, 0xb0,
- 0xc2, 0xa2, 0xd7, 0xe3, 0xa0, 0x9f, 0xc5, 0xfc, 0xd7, 0x8d, 0x7a, 0x7b, 0xf8, 0x12, 0xc9, 0x8f,
- 0xf4, 0x69, 0xdf, 0x02, 0x94, 0xa6, 0xef, 0x21, 0x2c, 0x5d, 0xd9, 0x0c, 0x4b, 0x57, 0x4a, 0x85,
- 0xa4, 0xfb, 0x33, 0x0b, 0xc6, 0x2a, 0x7e, 0x9d, 0x2e, 0xdd, 0xef, 0xa4, 0x75, 0xaa, 0x47, 0x3c,
- 0x1e, 0xe8, 0x10, 0xf1, 0xf8, 0x31, 0xe8, 0xaf, 0xf8, 0xf5, 0x95, 0x4a, 0xa7, 0xd0, 0x02, 0xf6,
- 0xdf, 0xb4, 0x60, 0xb0, 0xe2, 0xd7, 0x8f, 0xc1, 0x2c, 0xf0, 0x21, 0xd3, 0x2c, 0xf0, 0x50, 0xce,
- 0xbc, 0xc9, 0xb1, 0x04, 0xfc, 0x8d, 0x3e, 0x18, 0xa5, 0xed, 0xf4, 0xb7, 0xe4, 0x50, 0x1a, 0xdd,
- 0x66, 0xf5, 0xd0, 0x6d, 0x54, 0x0a, 0xf7, 0x1b, 0x0d, 0xff, 0x76, 0x72, 0x58, 0x97, 0x19, 0x14,
- 0x0b, 0x2c, 0x7a, 0x06, 0x86, 0x5a, 0x01, 0xd9, 0x75, 0x7d, 0x21, 0xde, 0x6a, 0x46, 0x96, 0x8a,
- 0x80, 0x63, 0x45, 0x41, 0xaf, 0x85, 0xa1, 0xeb, 0xd1, 0xa3, 0xbc, 0xe6, 0x7b, 0x75, 0xae, 0x39,
- 0x2f, 0x8a, 0xb4, 0x1c, 0x1a, 0x1c, 0x1b, 0x54, 0xe8, 0x16, 0x94, 0xd8, 0x7f, 0xb6, 0xed, 0x1c,
- 0x3e, 0x7b, 0xaf, 0xc8, 0x2a, 0x28, 0x18, 0xe0, 0x98, 0x17, 0x7a, 0x0e, 0x20, 0x92, 0xa1, 0xed,
- 0x43, 0x11, 0x68, 0x4d, 0x5d, 0x05, 0x54, 0xd0, 0xfb, 0x10, 0x6b, 0x54, 0xe8, 0x69, 0x28, 0x45,
- 0x8e, 0xdb, 0xb8, 0xe6, 0x7a, 0x24, 0x64, 0x1a, 0xf1, 0xa2, 0x4c, 0xee, 0x27, 0x80, 0x38, 0xc6,
- 0x53, 0x51, 0x8c, 0x05, 0xe1, 0xe0, 0xb9, 0xcb, 0x87, 0x18, 0x35, 0x13, 0xc5, 0xae, 0x29, 0x28,
- 0xd6, 0x28, 0xd0, 0x36, 0x3c, 0xe2, 0x7a, 0x2c, 0x85, 0x05, 0xa9, 0xee, 0xb8, 0xad, 0xf5, 0x6b,
- 0xd5, 0x9b, 0x24, 0x70, 0x37, 0xf7, 0xe6, 0x9d, 0xda, 0x0e, 0xf1, 0x64, 0x5e, 0x56, 0x99, 0xae,
- 0xfb, 0x91, 0x95, 0x0e, 0xb4, 0xb8, 0x23, 0x27, 0xfb, 0x79, 0x36, 0xdf, 0xaf, 0x57, 0xd1, 0x7b,
- 0x8d, 0xad, 0xe3, 0x94, 0xbe, 0x75, 0x1c, 0xec, 0x97, 0x07, 0xae, 0x57, 0xb5, 0x18, 0x12, 0x2f,
- 0xc1, 0xc9, 0x8a, 0x5f, 0xaf, 0xf8, 0x41, 0xb4, 0xec, 0x07, 0xb7, 0x9d, 0xa0, 0x2e, 0xa7, 0x57,
- 0x59, 0x46, 0xd1, 0xa0, 0xfb, 0x67, 0x3f, 0xdf, 0x5d, 0x8c, 0x08, 0x19, 0xcf, 0x33, 0x89, 0xed,
- 0x90, 0x6f, 0xbf, 0x6a, 0x4c, 0x76, 0x50, 0x49, 0x60, 0x2e, 0x3b, 0x11, 0x41, 0xd7, 0x59, 0xe6,
- 0xf5, 0xf8, 0x18, 0x15, 0xc5, 0x9f, 0xd2, 0x32, 0xaf, 0xc7, 0xc8, 0xcc, 0x73, 0xd7, 0x2c, 0x6f,
- 0x7f, 0x56, 0x54, 0xc2, 0xef, 0xe0, 0xdc, 0xbf, 0xae, 0x97, 0xd4, 0xc5, 0x32, 0x4b, 0x44, 0x21,
- 0x3f, 0xbd, 0x00, 0xb7, 0x7a, 0x76, 0xcc, 0x12, 0x61, 0xbf, 0x08, 0x93, 0xf4, 0xea, 0xa7, 0xe4,
- 0x28, 0xf6, 0x91, 0xdd, 0xa3, 0x79, 0xfc, 0xd7, 0x7e, 0x76, 0x0e, 0x24, 0xd2, 0x9f, 0xa0, 0x4f,
- 0xc2, 0x58, 0x48, 0xae, 0xb9, 0x5e, 0xfb, 0x8e, 0x54, 0xbc, 0x74, 0x78, 0x73, 0x58, 0x5d, 0xd2,
- 0x29, 0xb9, 0xfa, 0xd6, 0x84, 0xe1, 0x04, 0x37, 0xd4, 0x84, 0xb1, 0xdb, 0xae, 0x57, 0xf7, 0x6f,
- 0x87, 0x92, 0xff, 0x50, 0xbe, 0x16, 0xf7, 0x16, 0xa7, 0x4c, 0xb4, 0xd1, 0xa8, 0xee, 0x96, 0xc1,
- 0x0c, 0x27, 0x98, 0xd3, 0xb5, 0x16, 0xb4, 0xbd, 0xb9, 0xf0, 0x46, 0x48, 0x02, 0x91, 0xf9, 0x9f,
- 0xa7, 0xe5, 0x95, 0x40, 0x1c, 0xe3, 0xe9, 0x5a, 0x63, 0x7f, 0x2e, 0x07, 0x7e, 0x9b, 0xe7, 0xda,
- 0x10, 0x6b, 0x0d, 0x2b, 0x28, 0xd6, 0x28, 0xe8, 0x5e, 0xc4, 0xfe, 0xad, 0xf9, 0x1e, 0xf6, 0xfd,
- 0x48, 0xee, 0x5e, 0xcc, 0x13, 0x41, 0x83, 0x63, 0x83, 0x0a, 0x2d, 0x03, 0x0a, 0xdb, 0xad, 0x56,
- 0x83, 0x39, 0x33, 0x39, 0x0d, 0xc6, 0x8a, 0x7b, 0x79, 0x14, 0x79, 0xac, 0xe0, 0x6a, 0x0a, 0x8b,
- 0x33, 0x4a, 0xd0, 0x63, 0x69, 0x53, 0x34, 0xb5, 0x9f, 0x35, 0x95, 0x5b, 0x7c, 0xaa, 0xbc, 0x9d,
- 0x12, 0x87, 0x96, 0x60, 0x30, 0xdc, 0x0b, 0x6b, 0x91, 0x08, 0xed, 0x98, 0x93, 0x46, 0xab, 0xca,
- 0x48, 0xb4, 0x2c, 0x8e, 0xbc, 0x08, 0x96, 0x65, 0x51, 0x0d, 0xa6, 0x04, 0xc7, 0x85, 0x6d, 0xc7,
- 0x53, 0xf9, 0x82, 0xb8, 0x4f, 0xf7, 0xa5, 0x7b, 0xfb, 0xe5, 0x29, 0x51, 0xb3, 0x8e, 0x3e, 0xd8,
- 0x2f, 0x9f, 0xaa, 0xf8, 0xf5, 0x0c, 0x0c, 0xce, 0xe2, 0xc6, 0x27, 0x5f, 0xad, 0xe6, 0x37, 0x5b,
- 0x95, 0xc0, 0xdf, 0x74, 0x1b, 0xa4, 0x93, 0xd5, 0xac, 0x6a, 0x50, 0x8a, 0xc9, 0x67, 0xc0, 0x70,
- 0x82, 0x9b, 0xfd, 0x59, 0x26, 0xba, 0xb1, 0x64, 0xf1, 0x51, 0x3b, 0x20, 0xa8, 0x09, 0xa3, 0x2d,
- 0xb6, 0xb8, 0x45, 0x06, 0x0c, 0x31, 0xd7, 0x5f, 0xe8, 0x51, 0xfb, 0x73, 0x9b, 0xe5, 0xf5, 0x32,
- 0x3c, 0xa3, 0x2a, 0x3a, 0x3b, 0x6c, 0x72, 0xb7, 0xff, 0xf5, 0x69, 0x76, 0xf8, 0x57, 0xb9, 0x4a,
- 0x67, 0x50, 0x3c, 0x21, 0x11, 0xb7, 0xc8, 0x99, 0x7c, 0xdd, 0x62, 0x3c, 0x2c, 0xe2, 0x19, 0x0a,
- 0x96, 0x65, 0xd1, 0x27, 0x60, 0x8c, 0x5e, 0xca, 0xd4, 0x01, 0x1c, 0x4e, 0x9f, 0xc8, 0x0f, 0xf5,
- 0xa1, 0xa8, 0xf4, 0xec, 0x38, 0x7a, 0x61, 0x9c, 0x60, 0x86, 0x5e, 0x67, 0x9e, 0x48, 0x92, 0x75,
- 0xa1, 0x17, 0xd6, 0xba, 0xd3, 0x91, 0x64, 0xab, 0x31, 0x41, 0x6d, 0x98, 0x4a, 0x27, 0xec, 0x0b,
- 0xa7, 0xed, 0x7c, 0xe9, 0x36, 0x9d, 0x73, 0x2f, 0x4e, 0x63, 0x92, 0xc6, 0x85, 0x38, 0x8b, 0x3f,
- 0xba, 0x06, 0xa3, 0x22, 0x63, 0xba, 0x98, 0xb9, 0x45, 0x43, 0xe5, 0x39, 0x8a, 0x75, 0xe4, 0x41,
- 0x12, 0x80, 0xcd, 0xc2, 0x68, 0x0b, 0xce, 0x68, 0x49, 0xae, 0x2e, 0x07, 0x0e, 0xf3, 0x5b, 0x70,
- 0xd9, 0x76, 0xaa, 0x89, 0x25, 0x8f, 0xde, 0xdb, 0x2f, 0x9f, 0x59, 0xef, 0x44, 0x88, 0x3b, 0xf3,
- 0x41, 0xd7, 0xe1, 0x24, 0x7f, 0xa8, 0xbe, 0x48, 0x9c, 0x7a, 0xc3, 0xf5, 0x94, 0xdc, 0xc3, 0x97,
- 0xfc, 0xe9, 0x7b, 0xfb, 0xe5, 0x93, 0x73, 0x59, 0x04, 0x38, 0xbb, 0x1c, 0xfa, 0x10, 0x94, 0xea,
- 0x5e, 0x28, 0xfa, 0x60, 0xc0, 0xc8, 0x23, 0x56, 0x5a, 0x5c, 0xab, 0xaa, 0xef, 0x8f, 0xff, 0xe0,
- 0xb8, 0x00, 0xda, 0xe2, 0x6a, 0x71, 0xa5, 0xac, 0x19, 0x4c, 0x05, 0xea, 0x4a, 0xea, 0x33, 0x8d,
- 0xa7, 0xaa, 0xdc, 0x1e, 0xa4, 0x5e, 0x70, 0x18, 0xaf, 0x58, 0x0d, 0xc6, 0xe8, 0x35, 0x40, 0x22,
- 0x5e, 0xfd, 0x5c, 0x8d, 0xa5, 0x57, 0x61, 0x56, 0x84, 0x21, 0xf3, 0xf1, 0x64, 0x35, 0x45, 0x81,
- 0x33, 0x4a, 0xa1, 0x2b, 0x74, 0x57, 0xd1, 0xa1, 0x62, 0xd7, 0x52, 0xa9, 0x25, 0x17, 0x49, 0x2b,
- 0x20, 0xcc, 0x0f, 0xcb, 0xe4, 0x88, 0x13, 0xe5, 0x50, 0x1d, 0x1e, 0x71, 0xda, 0x91, 0xcf, 0x2c,
- 0x0e, 0x26, 0xe9, 0xba, 0xbf, 0x43, 0x3c, 0x66, 0xec, 0x1b, 0x9a, 0x3f, 0x47, 0x05, 0xab, 0xb9,
- 0x0e, 0x74, 0xb8, 0x23, 0x17, 0x2a, 0x10, 0xab, 0x5c, 0xd2, 0x60, 0x86, 0x1f, 0xcb, 0xc8, 0x27,
- 0xfd, 0x22, 0x0c, 0x6f, 0xfb, 0x61, 0xb4, 0x46, 0xa2, 0xdb, 0x7e, 0xb0, 0x23, 0xc2, 0xe8, 0xc6,
- 0x41, 0xc9, 0x63, 0x14, 0xd6, 0xe9, 0xe8, 0x8d, 0x97, 0xb9, 0xa2, 0xac, 0x2c, 0x32, 0x2f, 0x80,
- 0xa1, 0x78, 0x8f, 0xb9, 0xc2, 0xc1, 0x58, 0xe2, 0x25, 0xe9, 0x4a, 0x65, 0x81, 0x59, 0xf4, 0x13,
- 0xa4, 0x2b, 0x95, 0x05, 0x2c, 0xf1, 0x74, 0xba, 0x86, 0xdb, 0x4e, 0x40, 0x2a, 0x81, 0x5f, 0x23,
- 0xa1, 0x16, 0x0a, 0xff, 0x61, 0x1e, 0x24, 0x98, 0x4e, 0xd7, 0x6a, 0x16, 0x01, 0xce, 0x2e, 0x87,
- 0x48, 0x3a, 0xc1, 0xdb, 0x58, 0xbe, 0x29, 0x26, 0x2d, 0xcf, 0xf4, 0x98, 0xe3, 0xcd, 0x83, 0x09,
- 0x95, 0x5a, 0x8e, 0x87, 0x05, 0x0e, 0xa7, 0xc7, 0xd9, 0xdc, 0xee, 0x3d, 0xa6, 0xb0, 0x32, 0x6e,
- 0xad, 0x24, 0x38, 0xe1, 0x14, 0x6f, 0x23, 0xc2, 0xdc, 0x44, 0xd7, 0x08, 0x73, 0x17, 0xa1, 0x14,
- 0xb6, 0x37, 0xea, 0x7e, 0xd3, 0x71, 0x3d, 0x66, 0xd1, 0xd7, 0xae, 0x5e, 0x55, 0x89, 0xc0, 0x31,
- 0x0d, 0x5a, 0x86, 0x21, 0x47, 0x5a, 0xae, 0x50, 0x7e, 0x4c, 0x21, 0x65, 0xaf, 0xe2, 0x61, 0x36,
- 0xa4, 0xad, 0x4a, 0x95, 0x45, 0xaf, 0xc0, 0xa8, 0x78, 0x68, 0x2d, 0x52, 0xa7, 0x4e, 0x99, 0xaf,
- 0xe1, 0xaa, 0x3a, 0x12, 0x9b, 0xb4, 0xe8, 0x06, 0x0c, 0x47, 0x7e, 0x83, 0x3d, 0xe9, 0xa2, 0x62,
- 0xde, 0xa9, 0xfc, 0xe8, 0x78, 0xeb, 0x8a, 0x4c, 0x57, 0x1a, 0xab, 0xa2, 0x58, 0xe7, 0x83, 0xd6,
- 0xf9, 0x7c, 0x67, 0x81, 0xef, 0x49, 0x28, 0x72, 0x6f, 0x9e, 0xc9, 0x73, 0xc7, 0x62, 0x64, 0xe6,
- 0x72, 0x10, 0x25, 0xb1, 0xce, 0x06, 0x5d, 0x86, 0xc9, 0x56, 0xe0, 0xfa, 0x6c, 0x4e, 0x28, 0xa3,
- 0xe5, 0xb4, 0x99, 0xe6, 0xaa, 0x92, 0x24, 0xc0, 0xe9, 0x32, 0xec, 0x9d, 0xbc, 0x00, 0x4e, 0x9f,
- 0xe6, 0xa9, 0x3a, 0xf8, 0x4d, 0x96, 0xc3, 0xb0, 0xc2, 0xa2, 0x55, 0xb6, 0x13, 0x73, 0x25, 0xcc,
- 0xf4, 0x4c, 0x7e, 0x18, 0x23, 0x5d, 0x59, 0xc3, 0x85, 0x57, 0xf5, 0x17, 0xc7, 0x1c, 0x50, 0x5d,
- 0xcb, 0x90, 0x49, 0xaf, 0x00, 0xe1, 0xf4, 0x23, 0x1d, 0xfc, 0x01, 0x13, 0x97, 0xa2, 0x58, 0x20,
- 0x30, 0xc0, 0x21, 0x4e, 0xf0, 0x44, 0x1f, 0x81, 0x09, 0x11, 0x7c, 0x31, 0xee, 0xa6, 0x33, 0xb1,
- 0xa3, 0x3c, 0x4e, 0xe0, 0x70, 0x8a, 0x9a, 0xa7, 0xca, 0x70, 0x36, 0x1a, 0x44, 0x6c, 0x7d, 0xd7,
- 0x5c, 0x6f, 0x27, 0x9c, 0x3e, 0xcb, 0xf6, 0x07, 0x91, 0x2a, 0x23, 0x89, 0xc5, 0x19, 0x25, 0xd0,
- 0x3a, 0x4c, 0xb4, 0x02, 0x42, 0x9a, 0x4c, 0xd0, 0x17, 0xe7, 0x59, 0x99, 0x87, 0x89, 0xa0, 0x2d,
- 0xa9, 0x24, 0x70, 0x07, 0x19, 0x30, 0x9c, 0xe2, 0x80, 0x6e, 0xc3, 0x90, 0xbf, 0x4b, 0x82, 0x6d,
- 0xe2, 0xd4, 0xa7, 0xcf, 0x75, 0x78, 0xb8, 0x21, 0x0e, 0xb7, 0xeb, 0x82, 0x36, 0xe1, 0xe8, 0x20,
- 0xc1, 0xdd, 0x1d, 0x1d, 0x64, 0x65, 0xe8, 0x2f, 0x5a, 0x70, 0x5a, 0xda, 0x46, 0xaa, 0x2d, 0xda,
- 0xeb, 0x0b, 0xbe, 0x17, 0x46, 0x01, 0x0f, 0x6c, 0xf0, 0x68, 0xfe, 0x63, 0xff, 0xf5, 0x9c, 0x42,
- 0x4a, 0x0f, 0x7c, 0x3a, 0x8f, 0x22, 0xc4, 0xf9, 0x35, 0xa2, 0x05, 0x98, 0x0c, 0x49, 0x24, 0x37,
- 0xa3, 0xb9, 0x70, 0xf9, 0xf5, 0xc5, 0xb5, 0xe9, 0xc7, 0x78, 0x54, 0x06, 0xba, 0x18, 0xaa, 0x49,
- 0x24, 0x4e, 0xd3, 0xa3, 0x4b, 0x50, 0xf0, 0xc3, 0xe9, 0xc7, 0x3b, 0x24, 0x55, 0xf5, 0xeb, 0xd7,
- 0xab, 0xdc, 0xe1, 0xed, 0x7a, 0x15, 0x17, 0xfc, 0x50, 0xa6, 0xab, 0xa0, 0xf7, 0xb1, 0x70, 0xfa,
- 0x09, 0xae, 0x35, 0x94, 0xe9, 0x2a, 0x18, 0x10, 0xc7, 0x78, 0xb4, 0x0d, 0xe3, 0xa1, 0x71, 0xef,
- 0x0d, 0xa7, 0xcf, 0xb3, 0x9e, 0x7a, 0x22, 0x6f, 0xd0, 0x0c, 0x6a, 0x2d, 0xda, 0xbc, 0xc9, 0x05,
- 0x27, 0xd9, 0xf2, 0xd5, 0xa5, 0x5d, 0xf0, 0xc3, 0xe9, 0x27, 0xbb, 0xac, 0x2e, 0x8d, 0x58, 0x5f,
- 0x5d, 0x3a, 0x0f, 0x9c, 0xe0, 0x39, 0xf3, 0x5d, 0x30, 0x99, 0x12, 0x97, 0x0e, 0x93, 0x89, 0x69,
- 0x66, 0x07, 0x46, 0x8d, 0x29, 0xf9, 0x40, 0x1d, 0x0b, 0xbe, 0x67, 0x08, 0x4a, 0xca, 0xe8, 0x8c,
- 0x2e, 0x9a, 0xbe, 0x04, 0xa7, 0x93, 0xbe, 0x04, 0x43, 0x15, 0xbf, 0x6e, 0xb8, 0x0f, 0xac, 0x67,
- 0xc4, 0xee, 0xcb, 0xdb, 0x00, 0x7b, 0x7f, 0xd3, 0xa0, 0x69, 0xf2, 0x8b, 0x3d, 0x3b, 0x25, 0xf4,
- 0x75, 0x34, 0x0e, 0x5c, 0x86, 0x49, 0xcf, 0x67, 0x32, 0x3a, 0xa9, 0x4b, 0x01, 0x8c, 0xc9, 0x59,
- 0x25, 0x3d, 0x18, 0x4e, 0x82, 0x00, 0xa7, 0xcb, 0xd0, 0x0a, 0xb9, 0xa0, 0x94, 0xb4, 0x46, 0x70,
- 0x39, 0x0a, 0x0b, 0x2c, 0x7a, 0x0c, 0xfa, 0x5b, 0x7e, 0x7d, 0xa5, 0x22, 0xe4, 0x73, 0x2d, 0x62,
- 0x6c, 0x7d, 0xa5, 0x82, 0x39, 0x0e, 0xcd, 0xc1, 0x00, 0xfb, 0x11, 0x4e, 0x8f, 0xe4, 0x47, 0x3d,
- 0x61, 0x25, 0xb4, 0x3c, 0x57, 0xac, 0x00, 0x16, 0x05, 0x99, 0x56, 0x94, 0x5e, 0x6a, 0x98, 0x56,
- 0x74, 0xf0, 0x3e, 0xb5, 0xa2, 0x92, 0x01, 0x8e, 0x79, 0xa1, 0x3b, 0x70, 0xd2, 0xb8, 0x48, 0xf2,
- 0x29, 0x42, 0x42, 0x11, 0x79, 0xe1, 0xb1, 0x8e, 0x37, 0x48, 0xe1, 0xc4, 0x70, 0x46, 0x34, 0xfa,
- 0xe4, 0x4a, 0x16, 0x27, 0x9c, 0x5d, 0x01, 0x6a, 0xc0, 0x64, 0x2d, 0x55, 0xeb, 0x50, 0xef, 0xb5,
- 0xaa, 0x01, 0x4d, 0xd7, 0x98, 0x66, 0x8c, 0x5e, 0x81, 0xa1, 0xb7, 0xfc, 0x90, 0x9d, 0x6d, 0xe2,
- 0x4e, 0x21, 0x9f, 0xed, 0x0f, 0xbd, 0x7e, 0xbd, 0xca, 0xe0, 0x07, 0xfb, 0xe5, 0xe1, 0x8a, 0x5f,
- 0x97, 0x7f, 0xb1, 0x2a, 0x80, 0x7e, 0xc0, 0x82, 0x99, 0xf4, 0x4d, 0x55, 0x35, 0x7a, 0xb4, 0xf7,
- 0x46, 0xdb, 0xa2, 0xd2, 0x99, 0xa5, 0x5c, 0x76, 0xb8, 0x43, 0x55, 0xe8, 0x83, 0x74, 0x21, 0x84,
- 0xee, 0x5d, 0x22, 0x92, 0x84, 0x3e, 0x1a, 0x2f, 0x04, 0x0a, 0x3d, 0xd8, 0x2f, 0x8f, 0xf3, 0x2d,
- 0x2d, 0x7e, 0x37, 0x23, 0x0a, 0xd8, 0xbf, 0x6c, 0x31, 0xb5, 0xac, 0x80, 0x92, 0xb0, 0xdd, 0x38,
- 0x8e, 0xcc, 0xc0, 0x4b, 0x86, 0xc9, 0xf3, 0xbe, 0xfd, 0x61, 0xfe, 0x89, 0xc5, 0xfc, 0x61, 0x8e,
- 0xf1, 0xe1, 0xcb, 0xeb, 0x30, 0x14, 0xc9, 0x8c, 0xcd, 0x1d, 0x92, 0x19, 0x6b, 0x8d, 0x62, 0x3e,
- 0x41, 0xea, 0x72, 0xa0, 0x92, 0x33, 0x2b, 0x36, 0xf6, 0x3f, 0xe4, 0x23, 0x20, 0x31, 0xc7, 0x60,
- 0x59, 0x5a, 0x34, 0x2d, 0x4b, 0xe5, 0x2e, 0x5f, 0x90, 0x63, 0x61, 0xfa, 0x07, 0x66, 0xbb, 0x99,
- 0x52, 0xec, 0x9d, 0xee, 0x88, 0x65, 0x7f, 0xde, 0x02, 0x88, 0x63, 0x79, 0xf7, 0x90, 0x93, 0xef,
- 0x25, 0x7a, 0x1d, 0xf0, 0x23, 0xbf, 0xe6, 0x37, 0x84, 0xdd, 0xf4, 0x91, 0xd8, 0xb8, 0xc5, 0xe1,
- 0x07, 0xda, 0x6f, 0xac, 0xa8, 0x51, 0x59, 0x46, 0x0e, 0x2c, 0xc6, 0xe6, 0x56, 0x23, 0x6a, 0xe0,
- 0x17, 0x2d, 0x38, 0x91, 0xe5, 0x45, 0x4d, 0x2f, 0x97, 0x5c, 0x3d, 0xa8, 0x9c, 0xe4, 0xd4, 0x68,
- 0xde, 0x14, 0x70, 0xac, 0x28, 0x7a, 0x4e, 0x76, 0x78, 0xb8, 0x20, 0xda, 0xd7, 0x61, 0xb4, 0x12,
- 0x10, 0xed, 0x5c, 0x7e, 0x95, 0x47, 0xa3, 0xe0, 0xed, 0x79, 0xe6, 0xd0, 0x91, 0x28, 0xec, 0x2f,
- 0x17, 0xe0, 0x04, 0xf7, 0x35, 0x99, 0xdb, 0xf5, 0xdd, 0x7a, 0xc5, 0xaf, 0x8b, 0xb7, 0x72, 0x6f,
- 0xc0, 0x48, 0x4b, 0xd3, 0xe9, 0x76, 0x0a, 0x08, 0xab, 0xeb, 0x7e, 0x63, 0x2d, 0x94, 0x0e, 0xc5,
- 0x06, 0x2f, 0x54, 0x87, 0x11, 0xb2, 0xeb, 0xd6, 0x94, 0xc3, 0x42, 0xe1, 0xd0, 0x67, 0xa4, 0xaa,
- 0x65, 0x49, 0xe3, 0x83, 0x0d, 0xae, 0x0f, 0x20, 0x05, 0xb9, 0xfd, 0x63, 0x16, 0x3c, 0x94, 0x13,
- 0x3e, 0x96, 0x56, 0x77, 0x9b, 0x79, 0xf5, 0x88, 0x69, 0xab, 0xaa, 0xe3, 0xbe, 0x3e, 0x58, 0x60,
- 0xd1, 0x47, 0x01, 0xb8, 0xaf, 0x0e, 0xf1, 0x6a, 0x5d, 0xe3, 0x6c, 0x1a, 0x21, 0x02, 0xb5, 0x68,
- 0x6f, 0xb2, 0x3c, 0xd6, 0x78, 0xd9, 0x5f, 0xec, 0x83, 0x7e, 0xe6, 0x1b, 0x82, 0x2a, 0x30, 0xb8,
- 0xcd, 0x13, 0x02, 0x75, 0x1c, 0x37, 0x4a, 0x2b, 0x73, 0x0c, 0xc5, 0xe3, 0xa6, 0x41, 0xb1, 0x64,
- 0x83, 0x56, 0x61, 0x8a, 0xe7, 0x65, 0x6a, 0x2c, 0x92, 0x86, 0xb3, 0x27, 0xd5, 0xa5, 0x3c, 0x89,
- 0xb0, 0x52, 0x1b, 0xaf, 0xa4, 0x49, 0x70, 0x56, 0x39, 0xf4, 0x2a, 0x8c, 0xd1, 0xeb, 0xab, 0xdf,
- 0x8e, 0x24, 0x27, 0x9e, 0x91, 0x49, 0x49, 0xf4, 0xeb, 0x06, 0x16, 0x27, 0xa8, 0xd1, 0x2b, 0x30,
- 0xda, 0x4a, 0x29, 0x86, 0xfb, 0x63, 0x0d, 0x8a, 0xa9, 0x0c, 0x36, 0x69, 0x99, 0x23, 0x75, 0x9b,
- 0xb9, 0x8d, 0xaf, 0x6f, 0x07, 0x24, 0xdc, 0xf6, 0x1b, 0x75, 0x26, 0x39, 0xf6, 0x6b, 0x8e, 0xd4,
- 0x09, 0x3c, 0x4e, 0x95, 0xa0, 0x5c, 0x36, 0x1d, 0xb7, 0xd1, 0x0e, 0x48, 0xcc, 0x65, 0xc0, 0xe4,
- 0xb2, 0x9c, 0xc0, 0xe3, 0x54, 0x89, 0xee, 0x1a, 0xef, 0xc1, 0xa3, 0xd1, 0x78, 0xdb, 0x3f, 0x53,
- 0x00, 0x63, 0x68, 0xbf, 0x73, 0x33, 0x45, 0xd1, 0x2f, 0xdb, 0x0a, 0x5a, 0x35, 0xe1, 0x07, 0x95,
- 0xf9, 0x65, 0x71, 0x02, 0x58, 0xfe, 0x65, 0xf4, 0x3f, 0x66, 0xa5, 0xe8, 0x1a, 0x3f, 0x59, 0x09,
- 0x7c, 0x7a, 0xc8, 0xc9, 0x78, 0x65, 0xea, 0xbd, 0xc2, 0xa0, 0x7c, 0xcb, 0xdd, 0x21, 0xb2, 0xa7,
- 0xf0, 0xe8, 0xe6, 0x1c, 0x0c, 0x97, 0xa1, 0xaa, 0x08, 0xaa, 0x20, 0xb9, 0xa0, 0x4b, 0x30, 0x2c,
- 0xd2, 0xff, 0x30, 0xb7, 0x7a, 0xbe, 0x98, 0x98, 0x8b, 0xd3, 0x62, 0x0c, 0xc6, 0x3a, 0x8d, 0xfd,
- 0x83, 0x05, 0x98, 0xca, 0x78, 0x17, 0xc5, 0x8f, 0x91, 0x2d, 0x37, 0x8c, 0x54, 0x8e, 0x59, 0xed,
- 0x18, 0xe1, 0x70, 0xac, 0x28, 0xe8, 0x5e, 0xc5, 0x0f, 0xaa, 0xe4, 0xe1, 0x24, 0xde, 0x1d, 0x08,
- 0xec, 0x21, 0xb3, 0xb5, 0x9e, 0x83, 0xbe, 0x76, 0x48, 0x64, 0x4c, 0x5e, 0x75, 0x6c, 0x33, 0x73,
- 0x30, 0xc3, 0xd0, 0x1b, 0xd8, 0x96, 0xb2, 0xac, 0x6a, 0x37, 0x30, 0x6e, 0x5b, 0xe5, 0x38, 0xda,
- 0xb8, 0x88, 0x78, 0x8e, 0x17, 0x89, 0x7b, 0x5a, 0x1c, 0x5c, 0x92, 0x41, 0xb1, 0xc0, 0xda, 0x5f,
- 0x28, 0xc2, 0xe9, 0xdc, 0x97, 0x92, 0xb4, 0xe9, 0x4d, 0xdf, 0x73, 0x23, 0x5f, 0xf9, 0x8e, 0xf1,
- 0x80, 0x92, 0xa4, 0xb5, 0xbd, 0x2a, 0xe0, 0x58, 0x51, 0xa0, 0xf3, 0xd0, 0xcf, 0x94, 0xc9, 0xa9,
- 0x6c, 0xbb, 0xf3, 0x8b, 0x3c, 0xc2, 0x18, 0x47, 0xf7, 0x9c, 0x20, 0xfd, 0x31, 0x2a, 0xc1, 0xf8,
- 0x8d, 0xe4, 0x81, 0x42, 0x9b, 0xeb, 0xfb, 0x0d, 0xcc, 0x90, 0xe8, 0x09, 0xd1, 0x5f, 0x09, 0x67,
- 0x29, 0xec, 0xd4, 0xfd, 0x50, 0xeb, 0xb4, 0xa7, 0x60, 0x70, 0x87, 0xec, 0x05, 0xae, 0xb7, 0x95,
- 0x74, 0xa2, 0xbb, 0xca, 0xc1, 0x58, 0xe2, 0xcd, 0xf4, 0x90, 0x83, 0x47, 0x9d, 0xd9, 0x7c, 0xa8,
- 0xab, 0x78, 0xf2, 0xc3, 0x45, 0x18, 0xc7, 0xf3, 0x8b, 0xef, 0x0e, 0xc4, 0x8d, 0xf4, 0x40, 0x1c,
- 0x75, 0x66, 0xf3, 0xee, 0xa3, 0xf1, 0x0b, 0x16, 0x8c, 0xb3, 0x24, 0x44, 0x22, 0x1e, 0x82, 0xeb,
- 0x7b, 0xc7, 0x70, 0x15, 0x78, 0x0c, 0xfa, 0x03, 0x5a, 0x69, 0x32, 0xcd, 0x2e, 0x6b, 0x09, 0xe6,
- 0x38, 0xf4, 0x08, 0xf4, 0xb1, 0x26, 0xd0, 0xc1, 0x1b, 0xe1, 0x5b, 0xf0, 0xa2, 0x13, 0x39, 0x98,
- 0x41, 0x59, 0x7c, 0x2d, 0x4c, 0x5a, 0x0d, 0x97, 0x37, 0x3a, 0x36, 0xf5, 0xbf, 0x33, 0x62, 0x28,
- 0x64, 0x36, 0xed, 0xed, 0xc5, 0xd7, 0xca, 0x66, 0xd9, 0xf9, 0x9a, 0xfd, 0xc7, 0x05, 0x38, 0x9b,
- 0x59, 0xae, 0xe7, 0xf8, 0x5a, 0x9d, 0x4b, 0x3f, 0xc8, 0x34, 0x33, 0xc5, 0x63, 0x74, 0x51, 0xee,
- 0xeb, 0x55, 0xfa, 0xef, 0xef, 0x21, 0xec, 0x55, 0x66, 0x97, 0xbd, 0x43, 0xc2, 0x5e, 0x65, 0xb6,
- 0x2d, 0x47, 0x4d, 0xf0, 0xe7, 0x85, 0x9c, 0x6f, 0x61, 0x0a, 0x83, 0x0b, 0x74, 0x9f, 0x61, 0xc8,
- 0x50, 0x5e, 0xc2, 0xf9, 0x1e, 0xc3, 0x61, 0x58, 0x61, 0xd1, 0x1c, 0x8c, 0x37, 0x5d, 0x8f, 0x6e,
- 0x3e, 0x7b, 0xa6, 0x28, 0xae, 0x6c, 0x00, 0xab, 0x26, 0x1a, 0x27, 0xe9, 0x91, 0xab, 0x85, 0xc4,
- 0xe2, 0x5f, 0xf7, 0xca, 0xa1, 0x56, 0xdd, 0xac, 0xe9, 0x06, 0xa1, 0x7a, 0x31, 0x23, 0x3c, 0xd6,
- 0xaa, 0xa6, 0x27, 0x2a, 0xf6, 0xae, 0x27, 0x1a, 0xc9, 0xd6, 0x11, 0xcd, 0xbc, 0x02, 0xa3, 0xf7,
- 0x6d, 0x53, 0xb0, 0xbf, 0x5e, 0x84, 0x87, 0x3b, 0x2c, 0x7b, 0xbe, 0xd7, 0x1b, 0x63, 0xa0, 0xed,
- 0xf5, 0xa9, 0x71, 0xa8, 0xc0, 0x89, 0xcd, 0x76, 0xa3, 0xb1, 0xc7, 0x5e, 0xee, 0x90, 0xba, 0xa4,
- 0x10, 0x32, 0xa5, 0x54, 0x8e, 0x9c, 0x58, 0xce, 0xa0, 0xc1, 0x99, 0x25, 0xe9, 0x15, 0x8b, 0x9e,
- 0x24, 0x7b, 0x8a, 0x55, 0xe2, 0x8a, 0x85, 0x75, 0x24, 0x36, 0x69, 0xd1, 0x65, 0x98, 0x74, 0x76,
- 0x1d, 0x97, 0xc7, 0x15, 0x97, 0x0c, 0xf8, 0x1d, 0x4b, 0xa9, 0x82, 0xe7, 0x92, 0x04, 0x38, 0x5d,
- 0x06, 0xbd, 0x06, 0xc8, 0xdf, 0x60, 0xfe, 0xfd, 0xf5, 0xcb, 0xc4, 0x13, 0xd6, 0x6a, 0x36, 0x76,
- 0xc5, 0x78, 0x4b, 0xb8, 0x9e, 0xa2, 0xc0, 0x19, 0xa5, 0x12, 0xf1, 0x9f, 0x06, 0xf2, 0xe3, 0x3f,
- 0x75, 0xde, 0x17, 0xbb, 0x66, 0x38, 0xba, 0x04, 0xa3, 0x87, 0xf4, 0x5a, 0xb5, 0xff, 0x83, 0x45,
- 0x4f, 0x3c, 0x5e, 0xc6, 0x0c, 0xae, 0xfa, 0x0a, 0x73, 0xab, 0xe5, 0x9a, 0x65, 0x2d, 0xc0, 0xce,
- 0x49, 0xcd, 0xad, 0x36, 0x46, 0x62, 0x93, 0x96, 0xcf, 0x21, 0xcd, 0x1d, 0xd6, 0xb8, 0x15, 0x88,
- 0x08, 0x70, 0x8a, 0x02, 0x7d, 0x0c, 0x06, 0xeb, 0xee, 0xae, 0x1b, 0x0a, 0xe5, 0xd8, 0xa1, 0x8d,
- 0x58, 0xf1, 0xd6, 0xb9, 0xc8, 0xd9, 0x60, 0xc9, 0xcf, 0xfe, 0xe1, 0x42, 0xdc, 0x27, 0xaf, 0xb7,
- 0xfd, 0xc8, 0x39, 0x86, 0x93, 0xfc, 0xb2, 0x71, 0x92, 0x3f, 0xd1, 0x29, 0x0c, 0x1e, 0x6b, 0x52,
- 0xee, 0x09, 0x7e, 0x3d, 0x71, 0x82, 0x3f, 0xd9, 0x9d, 0x55, 0xe7, 0x93, 0xfb, 0x1f, 0x59, 0x30,
- 0x69, 0xd0, 0x1f, 0xc3, 0x01, 0xb2, 0x6c, 0x1e, 0x20, 0x8f, 0x76, 0xfd, 0x86, 0x9c, 0x83, 0xe3,
- 0xfb, 0x8a, 0x89, 0xb6, 0xb3, 0x03, 0xe3, 0x2d, 0xe8, 0xdb, 0x76, 0x82, 0x7a, 0xa7, 0xb4, 0x1f,
- 0xa9, 0x42, 0xb3, 0x57, 0x9c, 0x40, 0x58, 0xf8, 0x9f, 0x91, 0xbd, 0x4e, 0x41, 0x5d, 0xad, 0xfb,
- 0xac, 0x2a, 0xf4, 0x12, 0x0c, 0x84, 0x35, 0xbf, 0xa5, 0x9e, 0xfa, 0x9c, 0x63, 0x1d, 0xcd, 0x20,
- 0x07, 0xfb, 0x65, 0x64, 0x56, 0x47, 0xc1, 0x58, 0xd0, 0xa3, 0x37, 0x60, 0x94, 0xfd, 0x52, 0xee,
- 0x76, 0xc5, 0x7c, 0x0d, 0x46, 0x55, 0x27, 0xe4, 0xbe, 0xa8, 0x06, 0x08, 0x9b, 0xac, 0x66, 0xb6,
- 0xa0, 0xa4, 0x3e, 0xeb, 0x81, 0x5a, 0x89, 0xff, 0x6d, 0x11, 0xa6, 0x32, 0xe6, 0x1c, 0x0a, 0x8d,
- 0x91, 0xb8, 0xd4, 0xe3, 0x54, 0x7d, 0x9b, 0x63, 0x11, 0xb2, 0x0b, 0x54, 0x5d, 0xcc, 0xad, 0x9e,
- 0x2b, 0xbd, 0x11, 0x92, 0x64, 0xa5, 0x14, 0xd4, 0xbd, 0x52, 0x5a, 0xd9, 0xb1, 0x75, 0x35, 0xad,
- 0x48, 0xb5, 0xf4, 0x81, 0x8e, 0xe9, 0xaf, 0xf5, 0xc1, 0x89, 0xac, 0xc8, 0x9c, 0xe8, 0x33, 0x89,
- 0xa4, 0xb3, 0x2f, 0xf4, 0x1a, 0xd3, 0x93, 0x67, 0xa2, 0x15, 0x11, 0x03, 0x67, 0xcd, 0x34, 0xb4,
- 0x5d, 0xbb, 0x59, 0xd4, 0xc9, 0x62, 0x96, 0x04, 0x3c, 0x59, 0xb0, 0xdc, 0x3e, 0xde, 0xdf, 0x73,
- 0x03, 0x44, 0x96, 0xe1, 0x30, 0xe1, 0xca, 0x23, 0xc1, 0xdd, 0x5d, 0x79, 0x64, 0xcd, 0x68, 0x05,
- 0x06, 0x6a, 0xdc, 0x47, 0xa4, 0xd8, 0x7d, 0x0b, 0xe3, 0x0e, 0x22, 0x6a, 0x03, 0x16, 0x8e, 0x21,
- 0x82, 0xc1, 0x8c, 0x0b, 0xc3, 0x5a, 0xc7, 0x3c, 0xd0, 0xc9, 0xb3, 0x43, 0x0f, 0x3e, 0xad, 0x0b,
- 0x1e, 0xe8, 0x04, 0xfa, 0x31, 0x0b, 0x12, 0x0f, 0x45, 0x94, 0x52, 0xce, 0xca, 0x55, 0xca, 0x9d,
- 0x83, 0xbe, 0xc0, 0x6f, 0x90, 0x64, 0xa2, 0x57, 0xec, 0x37, 0x08, 0x66, 0x18, 0x4a, 0x11, 0xc5,
- 0xaa, 0x96, 0x11, 0xfd, 0x1a, 0x29, 0x2e, 0x88, 0x8f, 0x41, 0x7f, 0x83, 0xec, 0x92, 0x46, 0x32,
- 0x1f, 0xd7, 0x35, 0x0a, 0xc4, 0x1c, 0x67, 0xff, 0x42, 0x1f, 0x9c, 0xe9, 0x18, 0x40, 0x88, 0x5e,
- 0xc6, 0xb6, 0x9c, 0x88, 0xdc, 0x76, 0xf6, 0x92, 0x89, 0x73, 0x2e, 0x73, 0x30, 0x96, 0x78, 0xf6,
- 0x6a, 0x91, 0xc7, 0xbf, 0x4f, 0xa8, 0x30, 0x45, 0xd8, 0x7b, 0x81, 0x35, 0x55, 0x62, 0xc5, 0xa3,
- 0x50, 0x89, 0x3d, 0x07, 0x10, 0x86, 0x0d, 0xee, 0x4e, 0x57, 0x17, 0xcf, 0x21, 0xe3, 0x3c, 0x09,
- 0xd5, 0x6b, 0x02, 0x83, 0x35, 0x2a, 0xb4, 0x08, 0x13, 0xad, 0xc0, 0x8f, 0xb8, 0x46, 0x78, 0x91,
- 0x7b, 0x9c, 0xf6, 0x9b, 0xb1, 0x5b, 0x2a, 0x09, 0x3c, 0x4e, 0x95, 0x40, 0x2f, 0xc2, 0xb0, 0x88,
- 0xe7, 0x52, 0xf1, 0xfd, 0x86, 0x50, 0x42, 0x29, 0x27, 0xcc, 0x6a, 0x8c, 0xc2, 0x3a, 0x9d, 0x56,
- 0x8c, 0xa9, 0x99, 0x07, 0x33, 0x8b, 0x71, 0x55, 0xb3, 0x46, 0x97, 0x08, 0xf8, 0x3b, 0xd4, 0x53,
- 0xc0, 0xdf, 0x58, 0x2d, 0x57, 0xea, 0xd9, 0xea, 0x09, 0x5d, 0x15, 0x59, 0x5f, 0xe9, 0x83, 0x29,
- 0x31, 0x71, 0x1e, 0xf4, 0x74, 0xb9, 0x91, 0x9e, 0x2e, 0x47, 0xa1, 0xb8, 0x7b, 0x77, 0xce, 0x1c,
- 0xf7, 0x9c, 0xf9, 0x11, 0x0b, 0x4c, 0x49, 0x0d, 0xfd, 0x7f, 0xb9, 0x99, 0xc7, 0x5e, 0xcc, 0x95,
- 0xfc, 0x94, 0xc3, 0xe1, 0xdb, 0xcc, 0x41, 0x66, 0xff, 0x3b, 0x0b, 0x1e, 0xed, 0xca, 0x11, 0x2d,
- 0x41, 0x89, 0x89, 0x93, 0xda, 0x45, 0xef, 0x49, 0xe5, 0x91, 0x2e, 0x11, 0x39, 0xd2, 0x6d, 0x5c,
- 0x12, 0x2d, 0xa5, 0x52, 0xbc, 0x3d, 0x95, 0x91, 0xe2, 0xed, 0xa4, 0xd1, 0x3d, 0xf7, 0x99, 0xe3,
- 0xed, 0x87, 0xe8, 0x89, 0x63, 0xbc, 0x06, 0x43, 0xef, 0x37, 0x94, 0x8e, 0x76, 0x42, 0xe9, 0x88,
- 0x4c, 0x6a, 0xed, 0x0c, 0xf9, 0x08, 0x4c, 0xb0, 0x40, 0x6f, 0xec, 0x7d, 0x84, 0x78, 0xa7, 0x56,
- 0x88, 0x7d, 0xa0, 0xaf, 0x25, 0x70, 0x38, 0x45, 0x6d, 0xff, 0x51, 0x11, 0x06, 0xf8, 0xf2, 0x3b,
- 0x86, 0xeb, 0xe5, 0xd3, 0x50, 0x72, 0x9b, 0xcd, 0x36, 0xcf, 0xda, 0xd5, 0x1f, 0x7b, 0xd4, 0xae,
- 0x48, 0x20, 0x8e, 0xf1, 0x68, 0x59, 0xe8, 0xbb, 0x3b, 0xc4, 0x92, 0xe5, 0x0d, 0x9f, 0x5d, 0x74,
- 0x22, 0x87, 0xcb, 0x4a, 0xea, 0x9c, 0x8d, 0x35, 0xe3, 0xe8, 0x93, 0x00, 0x61, 0x14, 0xb8, 0xde,
- 0x16, 0x85, 0x89, 0x10, 0xd6, 0xef, 0xed, 0xc0, 0xad, 0xaa, 0x88, 0x39, 0xcf, 0x78, 0xcf, 0x51,
- 0x08, 0xac, 0x71, 0x44, 0xb3, 0xc6, 0x49, 0x3f, 0x93, 0x18, 0x3b, 0xe0, 0x5c, 0xe3, 0x31, 0x9b,
- 0xf9, 0x00, 0x94, 0x14, 0xf3, 0x6e, 0xda, 0xaf, 0x11, 0x5d, 0x2c, 0xfa, 0x30, 0x8c, 0x27, 0xda,
- 0x76, 0x28, 0xe5, 0xd9, 0x2f, 0x5a, 0x30, 0xce, 0x1b, 0xb3, 0xe4, 0xed, 0x8a, 0xd3, 0xe0, 0x2e,
- 0x9c, 0x68, 0x64, 0xec, 0xca, 0x62, 0xf8, 0x7b, 0xdf, 0xc5, 0x95, 0xb2, 0x2c, 0x0b, 0x8b, 0x33,
- 0xeb, 0x40, 0x17, 0xe8, 0x8a, 0xa3, 0xbb, 0xae, 0xd3, 0x10, 0xcf, 0xf2, 0x47, 0xf8, 0x6a, 0xe3,
- 0x30, 0xac, 0xb0, 0xf6, 0xef, 0x59, 0x30, 0xc9, 0x5b, 0x7e, 0x95, 0xec, 0xa9, 0xbd, 0xe9, 0x5b,
- 0xd9, 0x76, 0x91, 0x2f, 0xb2, 0x90, 0x93, 0x2f, 0x52, 0xff, 0xb4, 0x62, 0xc7, 0x4f, 0xfb, 0xb2,
- 0x05, 0x62, 0x86, 0x1c, 0x83, 0x3e, 0xe3, 0xbb, 0x4c, 0x7d, 0xc6, 0x4c, 0xfe, 0x22, 0xc8, 0x51,
- 0x64, 0xfc, 0x99, 0x05, 0x13, 0x9c, 0x20, 0xb6, 0xd5, 0x7f, 0x4b, 0xc7, 0xa1, 0x97, 0xac, 0xf2,
- 0x57, 0xc9, 0xde, 0xba, 0x5f, 0x71, 0xa2, 0xed, 0xec, 0x8f, 0x32, 0x06, 0xab, 0xaf, 0xe3, 0x60,
- 0xd5, 0xe5, 0x02, 0x32, 0xd2, 0x29, 0x75, 0x79, 0x5c, 0x7f, 0xd8, 0x74, 0x4a, 0xf6, 0x37, 0x2d,
- 0x40, 0xbc, 0x1a, 0x43, 0x70, 0xa3, 0xe2, 0x10, 0x83, 0x6a, 0x07, 0x5d, 0xbc, 0x35, 0x29, 0x0c,
- 0xd6, 0xa8, 0x8e, 0xa4, 0x7b, 0x12, 0x0e, 0x17, 0xc5, 0xee, 0x0e, 0x17, 0x87, 0xe8, 0xd1, 0x7f,
- 0x31, 0x00, 0xc9, 0x17, 0x71, 0xe8, 0x26, 0x8c, 0xd4, 0x9c, 0x96, 0xb3, 0xe1, 0x36, 0xdc, 0xc8,
- 0x25, 0x61, 0x27, 0x6f, 0xac, 0x05, 0x8d, 0x4e, 0x98, 0xc8, 0x35, 0x08, 0x36, 0xf8, 0xa0, 0x59,
- 0x80, 0x56, 0xe0, 0xee, 0xba, 0x0d, 0xb2, 0xc5, 0xd4, 0x2e, 0x2c, 0x10, 0x08, 0x77, 0x0d, 0x93,
- 0x50, 0xac, 0x51, 0x64, 0x84, 0x1f, 0x28, 0x3e, 0xe0, 0xf0, 0x03, 0x70, 0x6c, 0xe1, 0x07, 0xfa,
- 0x0e, 0x15, 0x7e, 0x60, 0xe8, 0xd0, 0xe1, 0x07, 0xfa, 0x7b, 0x0a, 0x3f, 0x80, 0xe1, 0x94, 0x94,
- 0x3d, 0xe9, 0xff, 0x65, 0xb7, 0x41, 0xc4, 0x85, 0x83, 0x47, 0x2f, 0x99, 0xb9, 0xb7, 0x5f, 0x3e,
- 0x85, 0x33, 0x29, 0x70, 0x4e, 0x49, 0xf4, 0x51, 0x98, 0x76, 0x1a, 0x0d, 0xff, 0xb6, 0x1a, 0xd4,
- 0xa5, 0xb0, 0xe6, 0x34, 0xb8, 0x09, 0x64, 0x90, 0x71, 0x7d, 0xe4, 0xde, 0x7e, 0x79, 0x7a, 0x2e,
- 0x87, 0x06, 0xe7, 0x96, 0x46, 0x1f, 0x82, 0x52, 0x2b, 0xf0, 0x6b, 0xab, 0xda, 0xb3, 0xdd, 0xb3,
- 0xb4, 0x03, 0x2b, 0x12, 0x78, 0xb0, 0x5f, 0x1e, 0x55, 0x7f, 0xd8, 0x81, 0x1f, 0x17, 0xc8, 0x88,
- 0x27, 0x30, 0x7c, 0xa4, 0xf1, 0x04, 0x76, 0x60, 0xaa, 0x4a, 0x02, 0xd7, 0x69, 0xb8, 0x77, 0xa9,
- 0xbc, 0x2c, 0xf7, 0xa7, 0x75, 0x28, 0x05, 0x89, 0x1d, 0xb9, 0xa7, 0xf8, 0xae, 0x5a, 0x5e, 0x1b,
- 0xb9, 0x03, 0xc7, 0x8c, 0xec, 0xff, 0x65, 0xc1, 0xa0, 0x78, 0x01, 0x77, 0x0c, 0x52, 0xe3, 0x9c,
- 0x61, 0x94, 0x28, 0x67, 0x77, 0x18, 0x6b, 0x4c, 0xae, 0x39, 0x62, 0x25, 0x61, 0x8e, 0x78, 0xb4,
- 0x13, 0x93, 0xce, 0x86, 0x88, 0xbf, 0x5e, 0xa4, 0xd2, 0xbb, 0xf1, 0x16, 0xfb, 0xc1, 0x77, 0xc1,
- 0x1a, 0x0c, 0x86, 0xe2, 0x2d, 0x70, 0x21, 0xff, 0x31, 0x46, 0x72, 0x10, 0x63, 0x2f, 0x3a, 0xf1,
- 0xfa, 0x57, 0x32, 0xc9, 0x7c, 0x64, 0x5c, 0x7c, 0x80, 0x8f, 0x8c, 0xbb, 0xbd, 0x56, 0xef, 0x3b,
- 0x8a, 0xd7, 0xea, 0xf6, 0xd7, 0xd8, 0xc9, 0xa9, 0xc3, 0x8f, 0x41, 0xa8, 0xba, 0x6c, 0x9e, 0xb1,
- 0x76, 0x87, 0x99, 0x25, 0x1a, 0x95, 0x23, 0x5c, 0xfd, 0xbc, 0x05, 0x67, 0x32, 0xbe, 0x4a, 0x93,
- 0xb4, 0x9e, 0x81, 0x21, 0xa7, 0x5d, 0x77, 0xd5, 0x5a, 0xd6, 0x4c, 0x93, 0x73, 0x02, 0x8e, 0x15,
- 0x05, 0x5a, 0x80, 0x49, 0x72, 0xa7, 0xe5, 0x72, 0x43, 0xae, 0xee, 0x7c, 0x5c, 0xe4, 0xcf, 0x26,
- 0x97, 0x92, 0x48, 0x9c, 0xa6, 0x57, 0x71, 0x8d, 0x8a, 0xb9, 0x71, 0x8d, 0xfe, 0x8e, 0x05, 0xc3,
- 0xea, 0x35, 0xec, 0x03, 0xef, 0xed, 0x8f, 0x98, 0xbd, 0xfd, 0x70, 0x87, 0xde, 0xce, 0xe9, 0xe6,
- 0xdf, 0x29, 0xa8, 0xf6, 0x56, 0xfc, 0x20, 0xea, 0x41, 0x82, 0xbb, 0xff, 0x87, 0x13, 0x97, 0x60,
- 0xd8, 0x69, 0xb5, 0x24, 0x42, 0x7a, 0xc0, 0xb1, 0x68, 0xdd, 0x31, 0x18, 0xeb, 0x34, 0xea, 0x1d,
- 0x47, 0x31, 0xf7, 0x1d, 0x47, 0x1d, 0x20, 0x72, 0x82, 0x2d, 0x12, 0x51, 0x98, 0x70, 0xd8, 0xcd,
- 0xdf, 0x6f, 0xda, 0x91, 0xdb, 0x98, 0x75, 0xbd, 0x28, 0x8c, 0x82, 0xd9, 0x15, 0x2f, 0xba, 0x1e,
- 0xf0, 0x2b, 0xa4, 0x16, 0x19, 0x4c, 0xf1, 0xc2, 0x1a, 0x5f, 0x19, 0xf9, 0x81, 0xd5, 0xd1, 0x6f,
- 0xba, 0x52, 0xac, 0x09, 0x38, 0x56, 0x14, 0xf6, 0x07, 0xd8, 0xe9, 0xc3, 0xfa, 0xf4, 0x70, 0x51,
- 0xb1, 0x7e, 0x6a, 0x44, 0x8d, 0x06, 0x33, 0x8a, 0x2e, 0xea, 0xb1, 0xb7, 0x3a, 0x6f, 0xf6, 0xb4,
- 0x62, 0xfd, 0x41, 0x62, 0x1c, 0xa0, 0x0b, 0x7d, 0x3c, 0xe5, 0x1e, 0xf3, 0x6c, 0x97, 0x53, 0xe3,
- 0x10, 0x0e, 0x31, 0x2c, 0x75, 0x0f, 0x4b, 0x6c, 0xb2, 0x52, 0x11, 0xeb, 0x42, 0x4b, 0xdd, 0x23,
- 0x10, 0x38, 0xa6, 0xa1, 0xc2, 0x94, 0xfa, 0x13, 0x4e, 0xa3, 0x38, 0x84, 0xad, 0xa2, 0x0e, 0xb1,
- 0x46, 0x81, 0x2e, 0x0a, 0x85, 0x02, 0xb7, 0x0b, 0x3c, 0x9c, 0x50, 0x28, 0xc8, 0xee, 0xd2, 0xb4,
- 0x40, 0x97, 0x60, 0x58, 0x25, 0x6a, 0xaf, 0xf0, 0xa4, 0x59, 0x62, 0x9a, 0x2d, 0xc5, 0x60, 0xac,
- 0xd3, 0xa0, 0x75, 0x18, 0x0f, 0xb9, 0x9e, 0x4d, 0xc5, 0x15, 0xe7, 0xfa, 0xca, 0xf7, 0xaa, 0x77,
- 0xc8, 0x26, 0xfa, 0x80, 0x81, 0xf8, 0xee, 0x24, 0xa3, 0x33, 0x24, 0x59, 0xa0, 0x57, 0x61, 0xac,
- 0xe1, 0x3b, 0xf5, 0x79, 0xa7, 0xe1, 0x78, 0x35, 0xd6, 0x3f, 0x43, 0x66, 0xbe, 0xdf, 0x6b, 0x06,
- 0x16, 0x27, 0xa8, 0xa9, 0xf0, 0xa6, 0x43, 0x44, 0x74, 0x31, 0xc7, 0xdb, 0x22, 0xa1, 0x48, 0xbb,
- 0xcd, 0x84, 0xb7, 0x6b, 0x39, 0x34, 0x38, 0xb7, 0x34, 0x7a, 0x09, 0x46, 0xe4, 0xe7, 0x6b, 0xc1,
- 0x4c, 0xe2, 0x27, 0x31, 0x1a, 0x0e, 0x1b, 0x94, 0x28, 0x84, 0x93, 0xf2, 0xff, 0x7a, 0xe0, 0x6c,
- 0x6e, 0xba, 0x35, 0xf1, 0xc2, 0x9f, 0x3f, 0xbb, 0xfd, 0xb0, 0x7c, 0x1b, 0xba, 0x94, 0x45, 0x74,
- 0xb0, 0x5f, 0x7e, 0x44, 0xf4, 0x5a, 0x26, 0x1e, 0x67, 0xf3, 0x46, 0xab, 0x30, 0xb5, 0x4d, 0x9c,
- 0x46, 0xb4, 0xbd, 0xb0, 0x4d, 0x6a, 0x3b, 0x72, 0xc1, 0xb1, 0xf0, 0x28, 0xda, 0xd3, 0x91, 0x2b,
- 0x69, 0x12, 0x9c, 0x55, 0x0e, 0xbd, 0x09, 0xd3, 0xad, 0xf6, 0x46, 0xc3, 0x0d, 0xb7, 0xd7, 0xfc,
- 0x88, 0x39, 0x21, 0xa9, 0x9c, 0xef, 0x22, 0x8e, 0x8a, 0x0a, 0x40, 0x53, 0xc9, 0xa1, 0xc3, 0xb9,
- 0x1c, 0xd0, 0x5d, 0x38, 0x99, 0x98, 0x08, 0x22, 0x92, 0xc4, 0x58, 0x7e, 0x56, 0x91, 0x6a, 0x56,
- 0x01, 0x11, 0x94, 0x25, 0x0b, 0x85, 0xb3, 0xab, 0x40, 0x2f, 0x03, 0xb8, 0xad, 0x65, 0xa7, 0xe9,
- 0x36, 0xe8, 0x55, 0x71, 0x8a, 0xcd, 0x11, 0x7a, 0x6d, 0x80, 0x95, 0x8a, 0x84, 0xd2, 0xbd, 0x59,
- 0xfc, 0xdb, 0xc3, 0x1a, 0x35, 0xba, 0x06, 0x63, 0xe2, 0xdf, 0x9e, 0x18, 0x52, 0x1e, 0xd0, 0xe4,
- 0x71, 0x16, 0x8d, 0xaa, 0xa2, 0x63, 0x0e, 0x52, 0x10, 0x9c, 0x28, 0x8b, 0xb6, 0xe0, 0x8c, 0xcc,
- 0x10, 0xa7, 0xcf, 0x4f, 0x39, 0x06, 0x21, 0x4b, 0xe5, 0x31, 0xc4, 0x5f, 0xa5, 0xcc, 0x75, 0x22,
- 0xc4, 0x9d, 0xf9, 0xd0, 0x73, 0x5d, 0x9f, 0xe6, 0xfc, 0xc9, 0xef, 0x49, 0xee, 0xe1, 0x44, 0xcf,
- 0xf5, 0x6b, 0x49, 0x24, 0x4e, 0xd3, 0x23, 0x1f, 0x4e, 0xba, 0x5e, 0xd6, 0xac, 0x3e, 0xc5, 0x18,
- 0x7d, 0x90, 0xbf, 0x76, 0xee, 0x3c, 0xa3, 0x33, 0xf1, 0x38, 0x9b, 0xef, 0xdb, 0xf3, 0xfb, 0xfb,
- 0x5d, 0x8b, 0x96, 0xd6, 0xa4, 0x73, 0xf4, 0x29, 0x18, 0xd1, 0x3f, 0x4a, 0x48, 0x1a, 0xe7, 0xb3,
- 0x85, 0x57, 0x6d, 0x4f, 0xe0, 0xb2, 0xbd, 0x5a, 0xf7, 0x3a, 0x0e, 0x1b, 0x1c, 0x51, 0x2d, 0x23,
- 0x26, 0xc0, 0xc5, 0xde, 0x24, 0x99, 0xde, 0xdd, 0xde, 0x08, 0x64, 0x4f, 0x77, 0x74, 0x0d, 0x86,
- 0x6a, 0x0d, 0x97, 0x78, 0xd1, 0x4a, 0xa5, 0x53, 0xd4, 0xc3, 0x05, 0x41, 0x23, 0xd6, 0x8f, 0xc8,
- 0xca, 0xc1, 0x61, 0x58, 0x71, 0xb0, 0x7f, 0xa3, 0x00, 0xe5, 0x2e, 0x29, 0x5e, 0x12, 0x66, 0x28,
- 0xab, 0x27, 0x33, 0xd4, 0x1c, 0x8c, 0xc7, 0xff, 0x74, 0x0d, 0x97, 0xf2, 0x64, 0xbd, 0x69, 0xa2,
- 0x71, 0x92, 0xbe, 0xe7, 0x47, 0x09, 0xba, 0x25, 0xab, 0xaf, 0xeb, 0xb3, 0x1a, 0xc3, 0x82, 0xdd,
- 0xdf, 0xfb, 0xb5, 0x37, 0xd7, 0x1a, 0x69, 0x7f, 0xad, 0x00, 0x27, 0x55, 0x17, 0x7e, 0xe7, 0x76,
- 0xdc, 0x8d, 0x74, 0xc7, 0x1d, 0x81, 0x2d, 0xd7, 0xbe, 0x0e, 0x03, 0x3c, 0x8c, 0x63, 0x0f, 0xe2,
- 0xf6, 0x63, 0x66, 0x70, 0x67, 0x25, 0xe1, 0x19, 0x01, 0x9e, 0x7f, 0xc0, 0x82, 0xf1, 0xc4, 0xeb,
- 0x36, 0x84, 0xb5, 0x27, 0xd0, 0xf7, 0x23, 0x12, 0x67, 0x09, 0xdb, 0xe7, 0xa0, 0x6f, 0xdb, 0x0f,
- 0xa3, 0xa4, 0xa3, 0xc7, 0x15, 0x3f, 0x8c, 0x30, 0xc3, 0xd8, 0xbf, 0x6f, 0x41, 0xff, 0xba, 0xe3,
- 0x7a, 0x91, 0x34, 0x0a, 0x58, 0x39, 0x46, 0x81, 0x5e, 0xbe, 0x0b, 0xbd, 0x08, 0x03, 0x64, 0x73,
- 0x93, 0xd4, 0x22, 0x31, 0xaa, 0x32, 0xf4, 0xc4, 0xc0, 0x12, 0x83, 0x52, 0xf9, 0x8f, 0x55, 0xc6,
- 0xff, 0x62, 0x41, 0x8c, 0x6e, 0x41, 0x29, 0x72, 0x9b, 0x64, 0xae, 0x5e, 0x17, 0xa6, 0xf2, 0xfb,
- 0x08, 0x9f, 0xb1, 0x2e, 0x19, 0xe0, 0x98, 0x97, 0xfd, 0x85, 0x02, 0x40, 0x1c, 0xff, 0xaa, 0xdb,
- 0x27, 0xce, 0xa7, 0x8c, 0xa8, 0xe7, 0x33, 0x8c, 0xa8, 0x28, 0x66, 0x98, 0x61, 0x41, 0x55, 0xdd,
- 0x54, 0xec, 0xa9, 0x9b, 0xfa, 0x0e, 0xd3, 0x4d, 0x0b, 0x30, 0x19, 0xc7, 0xef, 0x32, 0xc3, 0x17,
- 0xb2, 0xa3, 0x73, 0x3d, 0x89, 0xc4, 0x69, 0x7a, 0x9b, 0xc0, 0x39, 0x15, 0xc6, 0x48, 0x9c, 0x68,
- 0xcc, 0x0f, 0x5c, 0x37, 0x4a, 0x77, 0xe9, 0xa7, 0xd8, 0x4a, 0x5c, 0xc8, 0xb5, 0x12, 0xff, 0xa4,
- 0x05, 0x27, 0x92, 0xf5, 0xb0, 0x47, 0xd3, 0x9f, 0xb7, 0xe0, 0x24, 0xb3, 0x95, 0xb3, 0x5a, 0xd3,
- 0x96, 0xf9, 0x17, 0x3a, 0x86, 0x66, 0xca, 0x69, 0x71, 0x1c, 0xe3, 0x64, 0x35, 0x8b, 0x35, 0xce,
- 0xae, 0xd1, 0xfe, 0x9f, 0x7d, 0x30, 0x9d, 0x17, 0xd3, 0x89, 0x3d, 0x13, 0x71, 0xee, 0x54, 0x77,
- 0xc8, 0x6d, 0xe1, 0x8c, 0x1f, 0x3f, 0x13, 0xe1, 0x60, 0x2c, 0xf1, 0xc9, 0xac, 0x1d, 0x85, 0x1e,
- 0xb3, 0x76, 0x6c, 0xc3, 0xe4, 0xed, 0x6d, 0xe2, 0xdd, 0xf0, 0x42, 0x27, 0x72, 0xc3, 0x4d, 0x97,
- 0xd9, 0x95, 0xf9, 0xbc, 0x91, 0xa9, 0x7e, 0x27, 0x6f, 0x25, 0x09, 0x0e, 0xf6, 0xcb, 0x67, 0x0c,
- 0x40, 0xdc, 0x64, 0xbe, 0x91, 0xe0, 0x34, 0xd3, 0x74, 0xd2, 0x93, 0xbe, 0x07, 0x9c, 0xf4, 0xa4,
- 0xe9, 0x0a, 0x6f, 0x14, 0xf9, 0x06, 0x80, 0xdd, 0x18, 0x57, 0x15, 0x14, 0x6b, 0x14, 0xe8, 0x13,
- 0x80, 0xf4, 0xa4, 0x4e, 0x46, 0x48, 0xcd, 0x67, 0xef, 0xed, 0x97, 0xd1, 0x5a, 0x0a, 0x7b, 0xb0,
- 0x5f, 0x9e, 0xa2, 0xd0, 0x15, 0x8f, 0xde, 0x3c, 0xe3, 0x38, 0x64, 0x19, 0x8c, 0xd0, 0x2d, 0x98,
- 0xa0, 0x50, 0xb6, 0xa2, 0x64, 0xbc, 0x4e, 0x7e, 0x5b, 0x7c, 0xfa, 0xde, 0x7e, 0x79, 0x62, 0x2d,
- 0x81, 0xcb, 0x63, 0x9d, 0x62, 0x82, 0x5e, 0x86, 0xb1, 0x78, 0x5e, 0x5d, 0x25, 0x7b, 0x3c, 0x3e,
- 0x4e, 0x89, 0x2b, 0xbc, 0x57, 0x0d, 0x0c, 0x4e, 0x50, 0xda, 0x9f, 0xb7, 0xe0, 0x74, 0x6e, 0xe2,
- 0x71, 0x74, 0x01, 0x86, 0x9c, 0x96, 0xcb, 0xcd, 0x17, 0xe2, 0xa8, 0x61, 0x6a, 0xb2, 0xca, 0x0a,
- 0x37, 0x5e, 0x28, 0x2c, 0xdd, 0xe1, 0x77, 0x5c, 0xaf, 0x9e, 0xdc, 0xe1, 0xaf, 0xba, 0x5e, 0x1d,
- 0x33, 0x8c, 0x3a, 0xb2, 0x8a, 0xb9, 0x4f, 0x11, 0xbe, 0x42, 0xd7, 0x6a, 0x46, 0x8a, 0xf2, 0xe3,
- 0x6d, 0x06, 0x7a, 0x5a, 0x37, 0x35, 0x0a, 0xaf, 0xc2, 0x5c, 0x33, 0xe3, 0xf7, 0x5b, 0x20, 0x9e,
- 0x2e, 0xf7, 0x70, 0x26, 0xbf, 0x01, 0x23, 0xbb, 0xe9, 0x84, 0x77, 0xe7, 0xf2, 0xdf, 0x72, 0x8b,
- 0x40, 0xe1, 0x4a, 0xd0, 0x36, 0x92, 0xdb, 0x19, 0xbc, 0xec, 0x3a, 0x08, 0xec, 0x22, 0x61, 0x06,
- 0x85, 0xee, 0xad, 0x79, 0x0e, 0xa0, 0xce, 0x68, 0x59, 0x16, 0xdc, 0x82, 0x29, 0x71, 0x2d, 0x2a,
- 0x0c, 0xd6, 0xa8, 0xec, 0x7f, 0x55, 0x80, 0x61, 0x99, 0x60, 0xad, 0xed, 0xf5, 0xa2, 0xf6, 0x3b,
- 0x54, 0xc6, 0x65, 0x74, 0x11, 0x4a, 0x4c, 0x2f, 0x5d, 0x89, 0xb5, 0xa5, 0x4a, 0x2b, 0xb4, 0x2a,
- 0x11, 0x38, 0xa6, 0xa1, 0xbb, 0x63, 0xd8, 0xde, 0x60, 0xe4, 0x89, 0x87, 0xb6, 0x55, 0x0e, 0xc6,
- 0x12, 0x8f, 0x3e, 0x0a, 0x13, 0xbc, 0x5c, 0xe0, 0xb7, 0x9c, 0x2d, 0x6e, 0xcb, 0xea, 0x57, 0xd1,
- 0x4b, 0x26, 0x56, 0x13, 0xb8, 0x83, 0xfd, 0xf2, 0x89, 0x24, 0x8c, 0x19, 0x69, 0x53, 0x5c, 0x98,
- 0xcb, 0x1a, 0xaf, 0x84, 0xee, 0xea, 0x29, 0x4f, 0xb7, 0x18, 0x85, 0x75, 0x3a, 0xfb, 0x53, 0x80,
- 0xd2, 0xa9, 0xe6, 0xd0, 0x6b, 0xdc, 0xe5, 0xd9, 0x0d, 0x48, 0xbd, 0x93, 0xd1, 0x56, 0x8f, 0xd1,
- 0x21, 0xdf, 0xc8, 0xf1, 0x52, 0x58, 0x95, 0xb7, 0xff, 0x52, 0x11, 0x26, 0x92, 0x51, 0x01, 0xd0,
- 0x15, 0x18, 0xe0, 0x22, 0xa5, 0x60, 0xdf, 0xc1, 0x27, 0x48, 0x8b, 0x25, 0xc0, 0x0e, 0x57, 0x21,
- 0x95, 0x8a, 0xf2, 0xe8, 0x4d, 0x18, 0xae, 0xfb, 0xb7, 0xbd, 0xdb, 0x4e, 0x50, 0x9f, 0xab, 0xac,
- 0x88, 0xe9, 0x9c, 0xa9, 0xa8, 0x58, 0x8c, 0xc9, 0xf4, 0xf8, 0x04, 0xcc, 0xfe, 0x1d, 0xa3, 0xb0,
- 0xce, 0x0e, 0xad, 0xb3, 0xfc, 0x14, 0x9b, 0xee, 0xd6, 0xaa, 0xd3, 0xea, 0xf4, 0xfe, 0x65, 0x41,
- 0x12, 0x69, 0x9c, 0x47, 0x45, 0x12, 0x0b, 0x8e, 0xc0, 0x31, 0x23, 0xf4, 0x19, 0x98, 0x0a, 0x73,
- 0x4c, 0x27, 0x79, 0x99, 0x47, 0x3b, 0x59, 0x13, 0xe6, 0x1f, 0xba, 0xb7, 0x5f, 0x9e, 0xca, 0x32,
- 0xb2, 0x64, 0x55, 0x63, 0x7f, 0xf1, 0x04, 0x18, 0x8b, 0xd8, 0x48, 0x44, 0x6d, 0x1d, 0x51, 0x22,
- 0x6a, 0x0c, 0x43, 0xa4, 0xd9, 0x8a, 0xf6, 0x16, 0xdd, 0x40, 0x8c, 0x49, 0x26, 0xcf, 0x25, 0x41,
- 0x93, 0xe6, 0x29, 0x31, 0x58, 0xf1, 0xc9, 0xce, 0x16, 0x5e, 0xfc, 0x16, 0x66, 0x0b, 0xef, 0x3b,
- 0xc6, 0x6c, 0xe1, 0x6b, 0x30, 0xb8, 0xe5, 0x46, 0x98, 0xb4, 0x7c, 0x71, 0x99, 0xcb, 0x9c, 0x87,
- 0x97, 0x39, 0x49, 0x3a, 0x2f, 0xad, 0x40, 0x60, 0xc9, 0x04, 0xbd, 0xa6, 0x56, 0xe0, 0x40, 0xbe,
- 0xc2, 0x25, 0xed, 0xbc, 0x92, 0xb9, 0x06, 0x45, 0x4e, 0xf0, 0xc1, 0xfb, 0xcd, 0x09, 0xbe, 0x2c,
- 0x33, 0x79, 0x0f, 0xe5, 0x3f, 0x56, 0x63, 0x89, 0xba, 0xbb, 0xe4, 0xef, 0xbe, 0xa9, 0x67, 0x3f,
- 0x2f, 0xe5, 0xef, 0x04, 0x2a, 0xb1, 0x79, 0x8f, 0x39, 0xcf, 0xbf, 0xdf, 0x82, 0x93, 0xc9, 0xec,
- 0xa4, 0xec, 0x4d, 0x85, 0xf0, 0xf3, 0x78, 0xb1, 0x97, 0x74, 0xb1, 0xac, 0x80, 0x51, 0x21, 0xd3,
- 0x91, 0x66, 0x92, 0xe1, 0xec, 0xea, 0x68, 0x47, 0x07, 0x1b, 0x75, 0xe1, 0x6f, 0xf0, 0x58, 0x4e,
- 0xf2, 0xf4, 0x0e, 0x29, 0xd3, 0xd7, 0x33, 0x12, 0x75, 0x3f, 0x9e, 0x97, 0xa8, 0xbb, 0xe7, 0xf4,
- 0xdc, 0xaf, 0xa9, 0xb4, 0xe9, 0xa3, 0xf9, 0x53, 0x89, 0x27, 0x45, 0xef, 0x9a, 0x2c, 0xfd, 0x35,
- 0x95, 0x2c, 0xbd, 0x43, 0x44, 0x6e, 0x9e, 0x0a, 0xbd, 0x6b, 0x8a, 0x74, 0x2d, 0xcd, 0xf9, 0xf8,
- 0xd1, 0xa4, 0x39, 0x37, 0x8e, 0x1a, 0x9e, 0x69, 0xfb, 0xe9, 0x2e, 0x47, 0x8d, 0xc1, 0xb7, 0xf3,
- 0x61, 0xc3, 0x53, 0xba, 0x4f, 0xde, 0x57, 0x4a, 0xf7, 0x9b, 0x7a, 0x8a, 0x74, 0xd4, 0x25, 0x07,
- 0x38, 0x25, 0xea, 0x31, 0x31, 0xfa, 0x4d, 0xfd, 0x00, 0x9c, 0xca, 0xe7, 0xab, 0xce, 0xb9, 0x34,
- 0xdf, 0xcc, 0x23, 0x30, 0x95, 0x70, 0xfd, 0xc4, 0xf1, 0x24, 0x5c, 0x3f, 0x79, 0xe4, 0x09, 0xd7,
- 0x4f, 0x1d, 0x43, 0xc2, 0xf5, 0x87, 0x8e, 0x31, 0xe1, 0xfa, 0x4d, 0xe6, 0x1c, 0xc5, 0x03, 0x40,
- 0x89, 0x08, 0xe2, 0x4f, 0xe5, 0xc4, 0x4f, 0x4b, 0x47, 0x89, 0xe2, 0x1f, 0xa7, 0x50, 0x38, 0x66,
- 0x95, 0x91, 0xc8, 0x7d, 0xfa, 0x01, 0x24, 0x72, 0x5f, 0x8b, 0x13, 0xb9, 0x9f, 0xce, 0x1f, 0xea,
- 0x8c, 0xe7, 0x34, 0x39, 0xe9, 0xdb, 0x6f, 0xea, 0x69, 0xd7, 0x1f, 0xee, 0x60, 0x05, 0xcb, 0x52,
- 0x28, 0x77, 0x48, 0xb6, 0xfe, 0x2a, 0x4f, 0xb6, 0xfe, 0x48, 0xfe, 0x4e, 0x9e, 0x3c, 0xee, 0x8c,
- 0x14, 0xeb, 0xb4, 0x5d, 0x2a, 0xf6, 0x2a, 0x8b, 0x95, 0x9e, 0xd3, 0x2e, 0x15, 0xbc, 0x35, 0xdd,
- 0x2e, 0x85, 0xc2, 0x31, 0x2b, 0xfb, 0x07, 0x0b, 0x70, 0xb6, 0xf3, 0x7a, 0x8b, 0xb5, 0xe4, 0x95,
- 0xd8, 0x21, 0x20, 0xa1, 0x25, 0xe7, 0x77, 0xb6, 0x98, 0xaa, 0xe7, 0x78, 0x90, 0x97, 0x61, 0x52,
- 0xbd, 0xc3, 0x69, 0xb8, 0xb5, 0xbd, 0xb5, 0xf8, 0x9a, 0xac, 0x22, 0x27, 0x54, 0x93, 0x04, 0x38,
- 0x5d, 0x06, 0xcd, 0xc1, 0xb8, 0x01, 0x5c, 0x59, 0x14, 0x77, 0xb3, 0x38, 0x3a, 0xb7, 0x89, 0xc6,
- 0x49, 0x7a, 0xfb, 0x4b, 0x16, 0x3c, 0x94, 0x93, 0xa9, 0xb4, 0xe7, 0x70, 0x87, 0x9b, 0x30, 0xde,
- 0x32, 0x8b, 0x76, 0x89, 0xd0, 0x6a, 0xe4, 0x43, 0x55, 0x6d, 0x4d, 0x20, 0x70, 0x92, 0xa9, 0xfd,
- 0xb3, 0x05, 0x38, 0xd3, 0xd1, 0xb1, 0x14, 0x61, 0x38, 0xb5, 0xd5, 0x0c, 0x9d, 0x85, 0x80, 0xd4,
- 0x89, 0x17, 0xb9, 0x4e, 0xa3, 0xda, 0x22, 0x35, 0xcd, 0xce, 0xc1, 0x3c, 0x34, 0x2f, 0xaf, 0x56,
- 0xe7, 0xd2, 0x14, 0x38, 0xa7, 0x24, 0x5a, 0x06, 0x94, 0xc6, 0x88, 0x11, 0x66, 0x51, 0xf7, 0xd3,
- 0xfc, 0x70, 0x46, 0x09, 0xf4, 0x01, 0x18, 0x55, 0x0e, 0xab, 0xda, 0x88, 0xb3, 0x8d, 0x1d, 0xeb,
- 0x08, 0x6c, 0xd2, 0xa1, 0x4b, 0x3c, 0x6d, 0x83, 0x48, 0xf0, 0x21, 0x8c, 0x22, 0xe3, 0x32, 0x27,
- 0x83, 0x00, 0x63, 0x9d, 0x66, 0xfe, 0xa5, 0xdf, 0xfc, 0xc6, 0xd9, 0xf7, 0xfc, 0xf6, 0x37, 0xce,
- 0xbe, 0xe7, 0xf7, 0xbe, 0x71, 0xf6, 0x3d, 0xdf, 0x7d, 0xef, 0xac, 0xf5, 0x9b, 0xf7, 0xce, 0x5a,
- 0xbf, 0x7d, 0xef, 0xac, 0xf5, 0x7b, 0xf7, 0xce, 0x5a, 0x7f, 0x70, 0xef, 0xac, 0xf5, 0x85, 0x3f,
- 0x3c, 0xfb, 0x9e, 0x37, 0x50, 0x1c, 0x40, 0xf4, 0x22, 0x1d, 0x9d, 0x8b, 0xbb, 0x97, 0xfe, 0x5f,
- 0x00, 0x00, 0x00, 0xff, 0xff, 0xb0, 0x6c, 0x51, 0x7f, 0x2c, 0x10, 0x01, 0x00,
+ // 14822 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0xbd, 0x69, 0x70, 0x24, 0xc9,
+ 0x75, 0x18, 0xcc, 0xea, 0xc6, 0xd5, 0x0f, 0x77, 0x62, 0x0e, 0x0c, 0x76, 0x66, 0x7a, 0xb6, 0x76,
+ 0x77, 0x76, 0xf6, 0xc2, 0x70, 0xf6, 0x20, 0x97, 0xbb, 0xe4, 0x8a, 0x38, 0x67, 0xb0, 0x03, 0x60,
+ 0x7a, 0xb3, 0x31, 0x33, 0xe4, 0x72, 0xc9, 0x60, 0xa1, 0x3b, 0x01, 0x14, 0xd1, 0xa8, 0xea, 0xad,
+ 0xaa, 0xc6, 0x0c, 0xe6, 0x23, 0x43, 0x12, 0xf5, 0xe9, 0xa0, 0xa4, 0xef, 0x0b, 0xc6, 0x17, 0xfa,
+ 0x8e, 0xa0, 0x14, 0x8a, 0x2f, 0x24, 0x59, 0x87, 0x69, 0xd9, 0xa6, 0x29, 0x4b, 0xb2, 0xa8, 0xcb,
+ 0x57, 0x58, 0x72, 0x38, 0x64, 0x59, 0x11, 0x16, 0x15, 0xa1, 0x30, 0x24, 0x8e, 0x1c, 0x21, 0x2b,
+ 0xc2, 0x96, 0xe4, 0xe3, 0x87, 0x0d, 0xcb, 0x96, 0x23, 0xcf, 0xca, 0xac, 0xa3, 0xbb, 0x31, 0x8b,
+ 0x01, 0x97, 0x8c, 0xfd, 0xd7, 0xfd, 0xde, 0xcb, 0x97, 0x59, 0x79, 0xbe, 0x7c, 0xef, 0xe5, 0x7b,
+ 0xf0, 0xea, 0xf6, 0xcb, 0xe1, 0xb4, 0xeb, 0x5f, 0xde, 0x6e, 0xad, 0x93, 0xc0, 0x23, 0x11, 0x09,
+ 0x2f, 0xef, 0x12, 0xaf, 0xee, 0x07, 0x97, 0x05, 0xc2, 0x69, 0xba, 0x97, 0x6b, 0x7e, 0x40, 0x2e,
+ 0xef, 0x5e, 0xb9, 0xbc, 0x49, 0x3c, 0x12, 0x38, 0x11, 0xa9, 0x4f, 0x37, 0x03, 0x3f, 0xf2, 0x11,
+ 0xe2, 0x34, 0xd3, 0x4e, 0xd3, 0x9d, 0xa6, 0x34, 0xd3, 0xbb, 0x57, 0xa6, 0x9e, 0xdb, 0x74, 0xa3,
+ 0xad, 0xd6, 0xfa, 0x74, 0xcd, 0xdf, 0xb9, 0xbc, 0xe9, 0x6f, 0xfa, 0x97, 0x19, 0xe9, 0x7a, 0x6b,
+ 0x83, 0xfd, 0x63, 0x7f, 0xd8, 0x2f, 0xce, 0x62, 0xea, 0xc5, 0xb8, 0x9a, 0x1d, 0xa7, 0xb6, 0xe5,
+ 0x7a, 0x24, 0xd8, 0xbb, 0xdc, 0xdc, 0xde, 0x64, 0xf5, 0x06, 0x24, 0xf4, 0x5b, 0x41, 0x8d, 0x24,
+ 0x2b, 0x6e, 0x5b, 0x2a, 0xbc, 0xbc, 0x43, 0x22, 0x27, 0xa3, 0xb9, 0x53, 0x97, 0xf3, 0x4a, 0x05,
+ 0x2d, 0x2f, 0x72, 0x77, 0xd2, 0xd5, 0x7c, 0xa0, 0x53, 0x81, 0xb0, 0xb6, 0x45, 0x76, 0x9c, 0x54,
+ 0xb9, 0x17, 0xf2, 0xca, 0xb5, 0x22, 0xb7, 0x71, 0xd9, 0xf5, 0xa2, 0x30, 0x0a, 0x92, 0x85, 0xec,
+ 0xaf, 0x5b, 0x70, 0x61, 0xe6, 0x76, 0x75, 0xa1, 0xe1, 0x84, 0x91, 0x5b, 0x9b, 0x6d, 0xf8, 0xb5,
+ 0xed, 0x6a, 0xe4, 0x07, 0xe4, 0x96, 0xdf, 0x68, 0xed, 0x90, 0x2a, 0xeb, 0x08, 0xf4, 0x2c, 0x0c,
+ 0xec, 0xb2, 0xff, 0x4b, 0xf3, 0x93, 0xd6, 0x05, 0xeb, 0x52, 0x69, 0x76, 0xec, 0xb7, 0xf6, 0xcb,
+ 0xef, 0xbb, 0xbf, 0x5f, 0x1e, 0xb8, 0x25, 0xe0, 0x58, 0x51, 0xa0, 0x8b, 0xd0, 0xb7, 0x11, 0xae,
+ 0xed, 0x35, 0xc9, 0x64, 0x81, 0xd1, 0x8e, 0x08, 0xda, 0xbe, 0xc5, 0x2a, 0x85, 0x62, 0x81, 0x45,
+ 0x97, 0xa1, 0xd4, 0x74, 0x82, 0xc8, 0x8d, 0x5c, 0xdf, 0x9b, 0x2c, 0x5e, 0xb0, 0x2e, 0xf5, 0xce,
+ 0x8e, 0x0b, 0xd2, 0x52, 0x45, 0x22, 0x70, 0x4c, 0x43, 0x9b, 0x11, 0x10, 0xa7, 0x7e, 0xc3, 0x6b,
+ 0xec, 0x4d, 0xf6, 0x5c, 0xb0, 0x2e, 0x0d, 0xc4, 0xcd, 0xc0, 0x02, 0x8e, 0x15, 0x85, 0xfd, 0xa5,
+ 0x02, 0x0c, 0xcc, 0x6c, 0x6c, 0xb8, 0x9e, 0x1b, 0xed, 0xa1, 0x5b, 0x30, 0xe4, 0xf9, 0x75, 0x22,
+ 0xff, 0xb3, 0xaf, 0x18, 0x7c, 0xfe, 0xc2, 0x74, 0x7a, 0x2a, 0x4d, 0xaf, 0x6a, 0x74, 0xb3, 0x63,
+ 0xf7, 0xf7, 0xcb, 0x43, 0x3a, 0x04, 0x1b, 0x7c, 0x10, 0x86, 0xc1, 0xa6, 0x5f, 0x57, 0x6c, 0x0b,
+ 0x8c, 0x6d, 0x39, 0x8b, 0x6d, 0x25, 0x26, 0x9b, 0x1d, 0xbd, 0xbf, 0x5f, 0x1e, 0xd4, 0x00, 0x58,
+ 0x67, 0x82, 0xd6, 0x61, 0x94, 0xfe, 0xf5, 0x22, 0x57, 0xf1, 0x2d, 0x32, 0xbe, 0x8f, 0xe5, 0xf1,
+ 0xd5, 0x48, 0x67, 0x27, 0xee, 0xef, 0x97, 0x47, 0x13, 0x40, 0x9c, 0x64, 0x68, 0xdf, 0x83, 0x91,
+ 0x99, 0x28, 0x72, 0x6a, 0x5b, 0xa4, 0xce, 0x47, 0x10, 0xbd, 0x08, 0x3d, 0x9e, 0xb3, 0x43, 0xc4,
+ 0xf8, 0x5e, 0x10, 0x1d, 0xdb, 0xb3, 0xea, 0xec, 0x90, 0x83, 0xfd, 0xf2, 0xd8, 0x4d, 0xcf, 0x7d,
+ 0xbb, 0x25, 0x66, 0x05, 0x85, 0x61, 0x46, 0x8d, 0x9e, 0x07, 0xa8, 0x93, 0x5d, 0xb7, 0x46, 0x2a,
+ 0x4e, 0xb4, 0x25, 0xc6, 0x1b, 0x89, 0xb2, 0x30, 0xaf, 0x30, 0x58, 0xa3, 0xb2, 0xef, 0x42, 0x69,
+ 0x66, 0xd7, 0x77, 0xeb, 0x15, 0xbf, 0x1e, 0xa2, 0x6d, 0x18, 0x6d, 0x06, 0x64, 0x83, 0x04, 0x0a,
+ 0x34, 0x69, 0x5d, 0x28, 0x5e, 0x1a, 0x7c, 0xfe, 0x52, 0xe6, 0xc7, 0x9a, 0xa4, 0x0b, 0x5e, 0x14,
+ 0xec, 0xcd, 0x9e, 0x16, 0xf5, 0x8d, 0x26, 0xb0, 0x38, 0xc9, 0xd9, 0xfe, 0x27, 0x05, 0x38, 0x39,
+ 0x73, 0xaf, 0x15, 0x90, 0x79, 0x37, 0xdc, 0x4e, 0xce, 0xf0, 0xba, 0x1b, 0x6e, 0xaf, 0xc6, 0x3d,
+ 0xa0, 0xa6, 0xd6, 0xbc, 0x80, 0x63, 0x45, 0x81, 0x9e, 0x83, 0x7e, 0xfa, 0xfb, 0x26, 0x5e, 0x12,
+ 0x9f, 0x3c, 0x21, 0x88, 0x07, 0xe7, 0x9d, 0xc8, 0x99, 0xe7, 0x28, 0x2c, 0x69, 0xd0, 0x0a, 0x0c,
+ 0xd6, 0xd8, 0x82, 0xdc, 0x5c, 0xf1, 0xeb, 0x84, 0x0d, 0x66, 0x69, 0xf6, 0x19, 0x4a, 0x3e, 0x17,
+ 0x83, 0x0f, 0xf6, 0xcb, 0x93, 0xbc, 0x6d, 0x82, 0x85, 0x86, 0xc3, 0x7a, 0x79, 0x64, 0xab, 0xf5,
+ 0xd5, 0xc3, 0x38, 0x41, 0xc6, 0xda, 0xba, 0xa4, 0x2d, 0x95, 0x5e, 0xb6, 0x54, 0x86, 0xb2, 0x97,
+ 0x09, 0xba, 0x02, 0x3d, 0xdb, 0xae, 0x57, 0x9f, 0xec, 0x63, 0xbc, 0xce, 0xd1, 0x31, 0xbf, 0xee,
+ 0x7a, 0xf5, 0x83, 0xfd, 0xf2, 0xb8, 0xd1, 0x1c, 0x0a, 0xc4, 0x8c, 0xd4, 0xfe, 0xcf, 0x16, 0x94,
+ 0x19, 0x6e, 0xd1, 0x6d, 0x90, 0x0a, 0x09, 0x42, 0x37, 0x8c, 0x88, 0x17, 0x19, 0x1d, 0xfa, 0x3c,
+ 0x40, 0x48, 0x6a, 0x01, 0x89, 0xb4, 0x2e, 0x55, 0x13, 0xa3, 0xaa, 0x30, 0x58, 0xa3, 0xa2, 0x1b,
+ 0x42, 0xb8, 0xe5, 0x04, 0x6c, 0x7e, 0x89, 0x8e, 0x55, 0x1b, 0x42, 0x55, 0x22, 0x70, 0x4c, 0x63,
+ 0x6c, 0x08, 0xc5, 0x4e, 0x1b, 0x02, 0xfa, 0x08, 0x8c, 0xc6, 0x95, 0x85, 0x4d, 0xa7, 0x26, 0x3b,
+ 0x90, 0x2d, 0x99, 0xaa, 0x89, 0xc2, 0x49, 0x5a, 0xfb, 0x6f, 0x5a, 0x62, 0xf2, 0xd0, 0xaf, 0x7e,
+ 0x97, 0x7f, 0xab, 0xfd, 0xcb, 0x16, 0xf4, 0xcf, 0xba, 0x5e, 0xdd, 0xf5, 0x36, 0xd1, 0xa7, 0x61,
+ 0x80, 0x9e, 0x4d, 0x75, 0x27, 0x72, 0xc4, 0xbe, 0xf7, 0x7e, 0x6d, 0x6d, 0xa9, 0xa3, 0x62, 0xba,
+ 0xb9, 0xbd, 0x49, 0x01, 0xe1, 0x34, 0xa5, 0xa6, 0xab, 0xed, 0xc6, 0xfa, 0x67, 0x48, 0x2d, 0x5a,
+ 0x21, 0x91, 0x13, 0x7f, 0x4e, 0x0c, 0xc3, 0x8a, 0x2b, 0xba, 0x0e, 0x7d, 0x91, 0x13, 0x6c, 0x92,
+ 0x48, 0x6c, 0x80, 0x99, 0x1b, 0x15, 0x2f, 0x89, 0xe9, 0x8a, 0x24, 0x5e, 0x8d, 0xc4, 0xc7, 0xc2,
+ 0x1a, 0x2b, 0x8a, 0x05, 0x0b, 0xfb, 0x7f, 0xf4, 0xc3, 0x99, 0xb9, 0xea, 0x52, 0xce, 0xbc, 0xba,
+ 0x08, 0x7d, 0xf5, 0xc0, 0xdd, 0x25, 0x81, 0xe8, 0x67, 0xc5, 0x65, 0x9e, 0x41, 0xb1, 0xc0, 0xa2,
+ 0x97, 0x61, 0x88, 0x1f, 0x48, 0xd7, 0x1c, 0xaf, 0xde, 0x90, 0x5d, 0x7c, 0x42, 0x50, 0x0f, 0xdd,
+ 0xd2, 0x70, 0xd8, 0xa0, 0x3c, 0xe4, 0xa4, 0xba, 0x98, 0x58, 0x8c, 0x79, 0x87, 0xdd, 0x17, 0x2c,
+ 0x18, 0xe3, 0xd5, 0xcc, 0x44, 0x51, 0xe0, 0xae, 0xb7, 0x22, 0x12, 0x4e, 0xf6, 0xb2, 0x9d, 0x6e,
+ 0x2e, 0xab, 0xb7, 0x72, 0x7b, 0x60, 0xfa, 0x56, 0x82, 0x0b, 0xdf, 0x04, 0x27, 0x45, 0xbd, 0x63,
+ 0x49, 0x34, 0x4e, 0x55, 0x8b, 0xbe, 0xc7, 0x82, 0xa9, 0x9a, 0xef, 0x45, 0x81, 0xdf, 0x68, 0x90,
+ 0xa0, 0xd2, 0x5a, 0x6f, 0xb8, 0xe1, 0x16, 0x9f, 0xa7, 0x98, 0x6c, 0xb0, 0x9d, 0x20, 0x67, 0x0c,
+ 0x15, 0x91, 0x18, 0xc3, 0xf3, 0xf7, 0xf7, 0xcb, 0x53, 0x73, 0xb9, 0xac, 0x70, 0x9b, 0x6a, 0xd0,
+ 0x36, 0x20, 0x7a, 0x94, 0x56, 0x23, 0x67, 0x93, 0xc4, 0x95, 0xf7, 0x77, 0x5f, 0xf9, 0xa9, 0xfb,
+ 0xfb, 0x65, 0xb4, 0x9a, 0x62, 0x81, 0x33, 0xd8, 0xa2, 0xb7, 0xe1, 0x04, 0x85, 0xa6, 0xbe, 0x75,
+ 0xa0, 0xfb, 0xea, 0x26, 0xef, 0xef, 0x97, 0x4f, 0xac, 0x66, 0x30, 0xc1, 0x99, 0xac, 0xd1, 0x77,
+ 0x59, 0x70, 0x26, 0xfe, 0xfc, 0x85, 0xbb, 0x4d, 0xc7, 0xab, 0xc7, 0x15, 0x97, 0xba, 0xaf, 0x98,
+ 0xee, 0xc9, 0x67, 0xe6, 0xf2, 0x38, 0xe1, 0xfc, 0x4a, 0x90, 0x07, 0x13, 0xb4, 0x69, 0xc9, 0xba,
+ 0xa1, 0xfb, 0xba, 0x4f, 0xdf, 0xdf, 0x2f, 0x4f, 0xac, 0xa6, 0x79, 0xe0, 0x2c, 0xc6, 0x53, 0x73,
+ 0x70, 0x32, 0x73, 0x76, 0xa2, 0x31, 0x28, 0x6e, 0x13, 0x2e, 0x75, 0x95, 0x30, 0xfd, 0x89, 0x4e,
+ 0x40, 0xef, 0xae, 0xd3, 0x68, 0x89, 0x85, 0x89, 0xf9, 0x9f, 0x57, 0x0a, 0x2f, 0x5b, 0xf6, 0x3f,
+ 0x2d, 0xc2, 0xe8, 0x5c, 0x75, 0xe9, 0x81, 0x56, 0xbd, 0x7e, 0xec, 0x15, 0xda, 0x1e, 0x7b, 0xf1,
+ 0x21, 0x5a, 0xcc, 0x3d, 0x44, 0xbf, 0x33, 0x63, 0xc9, 0xf6, 0xb0, 0x25, 0xfb, 0xa1, 0x9c, 0x25,
+ 0x7b, 0xc4, 0x0b, 0x75, 0x37, 0x67, 0xd6, 0xf6, 0xb2, 0x01, 0xcc, 0x94, 0x90, 0x96, 0xfd, 0x9a,
+ 0xd3, 0x48, 0x6e, 0xb5, 0x87, 0x9c, 0xba, 0x47, 0x33, 0x8e, 0x35, 0x18, 0x9a, 0x73, 0x9a, 0xce,
+ 0xba, 0xdb, 0x70, 0x23, 0x97, 0x84, 0xe8, 0x49, 0x28, 0x3a, 0xf5, 0x3a, 0x93, 0xee, 0x4a, 0xb3,
+ 0x27, 0xef, 0xef, 0x97, 0x8b, 0x33, 0x75, 0x2a, 0x66, 0x80, 0xa2, 0xda, 0xc3, 0x94, 0x02, 0x3d,
+ 0x0d, 0x3d, 0xf5, 0xc0, 0x6f, 0x4e, 0x16, 0x18, 0x25, 0x5d, 0xe5, 0x3d, 0xf3, 0x81, 0xdf, 0x4c,
+ 0x90, 0x32, 0x1a, 0xfb, 0x37, 0x0b, 0x70, 0x76, 0x8e, 0x34, 0xb7, 0x16, 0xab, 0x39, 0xe7, 0xc5,
+ 0x25, 0x18, 0xd8, 0xf1, 0x3d, 0x37, 0xf2, 0x83, 0x50, 0x54, 0xcd, 0x66, 0xc4, 0x8a, 0x80, 0x61,
+ 0x85, 0x45, 0x17, 0xa0, 0xa7, 0x19, 0x0b, 0xb1, 0x43, 0x52, 0x00, 0x66, 0xe2, 0x2b, 0xc3, 0x50,
+ 0x8a, 0x56, 0x48, 0x02, 0x31, 0x63, 0x14, 0xc5, 0xcd, 0x90, 0x04, 0x98, 0x61, 0x62, 0x49, 0x80,
+ 0xca, 0x08, 0xe2, 0x44, 0x48, 0x48, 0x02, 0x14, 0x83, 0x35, 0x2a, 0x54, 0x81, 0x52, 0x98, 0x18,
+ 0xd9, 0xae, 0x96, 0xe6, 0x30, 0x13, 0x15, 0xd4, 0x48, 0xc6, 0x4c, 0x8c, 0x13, 0xac, 0xaf, 0xa3,
+ 0xa8, 0xf0, 0xb5, 0x02, 0x20, 0xde, 0x85, 0xdf, 0x62, 0x1d, 0x77, 0x33, 0xdd, 0x71, 0xdd, 0x2f,
+ 0x89, 0xa3, 0xea, 0xbd, 0xff, 0x62, 0xc1, 0xd9, 0x39, 0xd7, 0xab, 0x93, 0x20, 0x67, 0x02, 0x3e,
+ 0x9c, 0xbb, 0xf3, 0xe1, 0x84, 0x14, 0x63, 0x8a, 0xf5, 0x1c, 0xc1, 0x14, 0xb3, 0xff, 0xc2, 0x02,
+ 0xc4, 0x3f, 0xfb, 0x5d, 0xf7, 0xb1, 0x37, 0xd3, 0x1f, 0x7b, 0x04, 0xd3, 0xc2, 0xfe, 0x3b, 0x16,
+ 0x0c, 0xce, 0x35, 0x1c, 0x77, 0x47, 0x7c, 0xea, 0x1c, 0x8c, 0x4b, 0x45, 0x11, 0x03, 0x6b, 0xb2,
+ 0x3f, 0xdd, 0xdc, 0xc6, 0x71, 0x12, 0x89, 0xd3, 0xf4, 0xe8, 0x13, 0x70, 0xc6, 0x00, 0xae, 0x91,
+ 0x9d, 0x66, 0xc3, 0x89, 0xf4, 0x5b, 0x01, 0x3b, 0xfd, 0x71, 0x1e, 0x11, 0xce, 0x2f, 0x6f, 0x2f,
+ 0xc3, 0xc8, 0x5c, 0xc3, 0x25, 0x5e, 0xb4, 0x54, 0x99, 0xf3, 0xbd, 0x0d, 0x77, 0x13, 0xbd, 0x02,
+ 0x23, 0x91, 0xbb, 0x43, 0xfc, 0x56, 0x54, 0x25, 0x35, 0xdf, 0x63, 0x77, 0x6d, 0xeb, 0x52, 0xef,
+ 0x2c, 0xba, 0xbf, 0x5f, 0x1e, 0x59, 0x33, 0x30, 0x38, 0x41, 0x69, 0xff, 0x21, 0x1d, 0x71, 0x7f,
+ 0xa7, 0xe9, 0x7b, 0xc4, 0x8b, 0xe6, 0x7c, 0xaf, 0xce, 0x75, 0x32, 0xaf, 0x40, 0x4f, 0x44, 0x47,
+ 0x90, 0x7f, 0xf9, 0x45, 0xb9, 0xb4, 0xe9, 0xb8, 0x1d, 0xec, 0x97, 0x4f, 0xa5, 0x4b, 0xb0, 0x91,
+ 0x65, 0x65, 0xd0, 0x87, 0xa0, 0x2f, 0x8c, 0x9c, 0xa8, 0x15, 0x8a, 0x4f, 0x7d, 0x54, 0x8e, 0x7f,
+ 0x95, 0x41, 0x0f, 0xf6, 0xcb, 0xa3, 0xaa, 0x18, 0x07, 0x61, 0x51, 0x00, 0x3d, 0x05, 0xfd, 0x3b,
+ 0x24, 0x0c, 0x9d, 0x4d, 0x79, 0x7e, 0x8f, 0x8a, 0xb2, 0xfd, 0x2b, 0x1c, 0x8c, 0x25, 0x1e, 0x3d,
+ 0x06, 0xbd, 0x24, 0x08, 0xfc, 0x40, 0xec, 0x2a, 0xc3, 0x82, 0xb0, 0x77, 0x81, 0x02, 0x31, 0xc7,
+ 0xd9, 0xff, 0xd2, 0x82, 0x51, 0xd5, 0x56, 0x5e, 0xd7, 0x31, 0xdc, 0x9b, 0xde, 0x04, 0xa8, 0xc9,
+ 0x0f, 0x0c, 0xd9, 0x79, 0x37, 0xf8, 0xfc, 0xc5, 0x4c, 0xd1, 0x22, 0xd5, 0x8d, 0x31, 0x67, 0x05,
+ 0x0a, 0xb1, 0xc6, 0xcd, 0xfe, 0x35, 0x0b, 0x26, 0x12, 0x5f, 0xb4, 0xec, 0x86, 0x11, 0x7a, 0x2b,
+ 0xf5, 0x55, 0xd3, 0xdd, 0x7d, 0x15, 0x2d, 0xcd, 0xbe, 0x49, 0x2d, 0x3e, 0x09, 0xd1, 0xbe, 0xe8,
+ 0x1a, 0xf4, 0xba, 0x11, 0xd9, 0x91, 0x1f, 0xf3, 0x58, 0xdb, 0x8f, 0xe1, 0xad, 0x8a, 0x47, 0x64,
+ 0x89, 0x96, 0xc4, 0x9c, 0x81, 0xfd, 0x9b, 0x45, 0x28, 0xf1, 0x69, 0xbb, 0xe2, 0x34, 0x8f, 0x61,
+ 0x2c, 0x9e, 0x81, 0x92, 0xbb, 0xb3, 0xd3, 0x8a, 0x9c, 0x75, 0x71, 0x00, 0x0d, 0xf0, 0xcd, 0x60,
+ 0x49, 0x02, 0x71, 0x8c, 0x47, 0x4b, 0xd0, 0xc3, 0x9a, 0xc2, 0xbf, 0xf2, 0xc9, 0xec, 0xaf, 0x14,
+ 0x6d, 0x9f, 0x9e, 0x77, 0x22, 0x87, 0xcb, 0x7e, 0xea, 0xe4, 0xa3, 0x20, 0xcc, 0x58, 0x20, 0x07,
+ 0x60, 0xdd, 0xf5, 0x9c, 0x60, 0x8f, 0xc2, 0x26, 0x8b, 0x8c, 0xe1, 0x73, 0xed, 0x19, 0xce, 0x2a,
+ 0x7a, 0xce, 0x56, 0x7d, 0x58, 0x8c, 0xc0, 0x1a, 0xd3, 0xa9, 0x0f, 0x42, 0x49, 0x11, 0x1f, 0x46,
+ 0x84, 0x9b, 0xfa, 0x08, 0x8c, 0x26, 0xea, 0xea, 0x54, 0x7c, 0x48, 0x97, 0x00, 0x7f, 0x85, 0x6d,
+ 0x19, 0xa2, 0xd5, 0x0b, 0xde, 0xae, 0xd8, 0x39, 0xef, 0xc1, 0x89, 0x46, 0xc6, 0xde, 0x2b, 0xc6,
+ 0xb5, 0xfb, 0xbd, 0xfa, 0xac, 0xf8, 0xec, 0x13, 0x59, 0x58, 0x9c, 0x59, 0x07, 0x95, 0x6a, 0xfc,
+ 0x26, 0x5d, 0x20, 0x4e, 0x43, 0xbf, 0x20, 0xdc, 0x10, 0x30, 0xac, 0xb0, 0x74, 0xbf, 0x3b, 0xa1,
+ 0x1a, 0x7f, 0x9d, 0xec, 0x55, 0x49, 0x83, 0xd4, 0x22, 0x3f, 0xf8, 0xa6, 0x36, 0xff, 0x1c, 0xef,
+ 0x7d, 0xbe, 0x5d, 0x0e, 0x0a, 0x06, 0xc5, 0xeb, 0x64, 0x8f, 0x0f, 0x85, 0xfe, 0x75, 0xc5, 0xb6,
+ 0x5f, 0xf7, 0x15, 0x0b, 0x86, 0xd5, 0xd7, 0x1d, 0xc3, 0xbe, 0x30, 0x6b, 0xee, 0x0b, 0xe7, 0xda,
+ 0x4e, 0xf0, 0x9c, 0x1d, 0xe1, 0x6b, 0x05, 0x38, 0xa3, 0x68, 0xe8, 0x6d, 0x86, 0xff, 0x11, 0xb3,
+ 0xea, 0x32, 0x94, 0x3c, 0xa5, 0xd7, 0xb3, 0x4c, 0x85, 0x5a, 0xac, 0xd5, 0x8b, 0x69, 0xa8, 0x50,
+ 0xea, 0xc5, 0xc7, 0xec, 0x90, 0xae, 0xf0, 0x16, 0xca, 0xed, 0x59, 0x28, 0xb6, 0xdc, 0xba, 0x38,
+ 0x60, 0xde, 0x2f, 0x7b, 0xfb, 0xe6, 0xd2, 0xfc, 0xc1, 0x7e, 0xf9, 0xd1, 0x3c, 0x63, 0x0b, 0x3d,
+ 0xd9, 0xc2, 0xe9, 0x9b, 0x4b, 0xf3, 0x98, 0x16, 0x46, 0x33, 0x30, 0x2a, 0x4f, 0xe8, 0x5b, 0x54,
+ 0x40, 0xf4, 0x3d, 0x71, 0x0e, 0x29, 0xad, 0x35, 0x36, 0xd1, 0x38, 0x49, 0x8f, 0xe6, 0x61, 0x6c,
+ 0xbb, 0xb5, 0x4e, 0x1a, 0x24, 0xe2, 0x1f, 0x7c, 0x9d, 0x70, 0x9d, 0x6e, 0x29, 0xbe, 0x4b, 0x5e,
+ 0x4f, 0xe0, 0x71, 0xaa, 0x84, 0xfd, 0xd7, 0xec, 0x3c, 0x10, 0xbd, 0x57, 0x09, 0x7c, 0x3a, 0xb1,
+ 0x28, 0xf7, 0x6f, 0xe6, 0x74, 0xee, 0x66, 0x56, 0x5c, 0x27, 0x7b, 0x6b, 0x3e, 0xbd, 0x4b, 0x64,
+ 0xcf, 0x0a, 0x63, 0xce, 0xf7, 0xb4, 0x9d, 0xf3, 0xbf, 0x50, 0x80, 0x93, 0xaa, 0x07, 0x0c, 0xb1,
+ 0xf5, 0x5b, 0xbd, 0x0f, 0xae, 0xc0, 0x60, 0x9d, 0x6c, 0x38, 0xad, 0x46, 0xa4, 0x0c, 0x0c, 0xbd,
+ 0xdc, 0xc8, 0x34, 0x1f, 0x83, 0xb1, 0x4e, 0x73, 0x88, 0x6e, 0xfb, 0xf9, 0x61, 0x76, 0x10, 0x47,
+ 0x0e, 0x9d, 0xe3, 0x6a, 0xd5, 0x58, 0xb9, 0xab, 0xe6, 0x31, 0xe8, 0x75, 0x77, 0xa8, 0x60, 0x56,
+ 0x30, 0xe5, 0xad, 0x25, 0x0a, 0xc4, 0x1c, 0x87, 0x9e, 0x80, 0xfe, 0x9a, 0xbf, 0xb3, 0xe3, 0x78,
+ 0x75, 0x76, 0xe4, 0x95, 0x66, 0x07, 0xa9, 0xec, 0x36, 0xc7, 0x41, 0x58, 0xe2, 0xd0, 0x59, 0xe8,
+ 0x71, 0x82, 0x4d, 0xae, 0x75, 0x29, 0xcd, 0x0e, 0xd0, 0x9a, 0x66, 0x82, 0xcd, 0x10, 0x33, 0x28,
+ 0xbd, 0x34, 0xde, 0xf1, 0x83, 0x6d, 0xd7, 0xdb, 0x9c, 0x77, 0x03, 0xb1, 0x24, 0xd4, 0x59, 0x78,
+ 0x5b, 0x61, 0xb0, 0x46, 0x85, 0x16, 0xa1, 0xb7, 0xe9, 0x07, 0x51, 0x38, 0xd9, 0xc7, 0xba, 0xfb,
+ 0xd1, 0x9c, 0x8d, 0x88, 0x7f, 0x6d, 0xc5, 0x0f, 0xa2, 0xf8, 0x03, 0xe8, 0xbf, 0x10, 0xf3, 0xe2,
+ 0x68, 0x19, 0xfa, 0x89, 0xb7, 0xbb, 0x18, 0xf8, 0x3b, 0x93, 0x13, 0xf9, 0x9c, 0x16, 0x38, 0x09,
+ 0x9f, 0x66, 0xb1, 0x8c, 0x2a, 0xc0, 0x58, 0xb2, 0x40, 0x1f, 0x82, 0x22, 0xf1, 0x76, 0x27, 0xfb,
+ 0x19, 0xa7, 0xa9, 0x1c, 0x4e, 0xb7, 0x9c, 0x20, 0xde, 0xf3, 0x17, 0xbc, 0x5d, 0x4c, 0xcb, 0xa0,
+ 0x8f, 0x43, 0x49, 0x6e, 0x18, 0xa1, 0x50, 0x67, 0x66, 0x4e, 0x58, 0xb9, 0xcd, 0x60, 0xf2, 0x76,
+ 0xcb, 0x0d, 0xc8, 0x0e, 0xf1, 0xa2, 0x30, 0xde, 0x21, 0x25, 0x36, 0xc4, 0x31, 0x37, 0x54, 0x83,
+ 0xa1, 0x80, 0x84, 0xee, 0x3d, 0x52, 0xf1, 0x1b, 0x6e, 0x6d, 0x6f, 0xf2, 0x34, 0x6b, 0xde, 0x53,
+ 0x6d, 0xbb, 0x0c, 0x6b, 0x05, 0x62, 0x75, 0xbb, 0x0e, 0xc5, 0x06, 0x53, 0xf4, 0x06, 0x0c, 0x07,
+ 0x24, 0x8c, 0x9c, 0x20, 0x12, 0xb5, 0x4c, 0x2a, 0xf3, 0xd8, 0x30, 0xd6, 0x11, 0xfc, 0x3a, 0x11,
+ 0x57, 0x13, 0x63, 0xb0, 0xc9, 0x01, 0x7d, 0x5c, 0xea, 0xfe, 0x57, 0xfc, 0x96, 0x17, 0x85, 0x93,
+ 0x25, 0xd6, 0xee, 0x4c, 0xab, 0xec, 0xad, 0x98, 0x2e, 0x69, 0x1c, 0xe0, 0x85, 0xb1, 0xc1, 0x0a,
+ 0x7d, 0x12, 0x86, 0xf9, 0x7f, 0x6e, 0xdb, 0x0c, 0x27, 0x4f, 0x32, 0xde, 0x17, 0xf2, 0x79, 0x73,
+ 0xc2, 0xd9, 0x93, 0x82, 0xf9, 0xb0, 0x0e, 0x0d, 0xb1, 0xc9, 0x0d, 0x61, 0x18, 0x6e, 0xb8, 0xbb,
+ 0xc4, 0x23, 0x61, 0x58, 0x09, 0xfc, 0x75, 0x22, 0x54, 0xb5, 0x67, 0xb2, 0x6d, 0xa1, 0xfe, 0x3a,
+ 0x99, 0x1d, 0xa7, 0x3c, 0x97, 0xf5, 0x32, 0xd8, 0x64, 0x81, 0x6e, 0xc2, 0x08, 0xbd, 0x1b, 0xbb,
+ 0x31, 0xd3, 0xc1, 0x4e, 0x4c, 0xd9, 0x7d, 0x10, 0x1b, 0x85, 0x70, 0x82, 0x09, 0xba, 0x01, 0x43,
+ 0xac, 0xcf, 0x5b, 0x4d, 0xce, 0xf4, 0x54, 0x27, 0xa6, 0xcc, 0x94, 0x5e, 0xd5, 0x8a, 0x60, 0x83,
+ 0x01, 0x7a, 0x1d, 0x4a, 0x0d, 0x77, 0x83, 0xd4, 0xf6, 0x6a, 0x0d, 0x32, 0x39, 0xc4, 0xb8, 0x65,
+ 0x6e, 0x86, 0xcb, 0x92, 0x88, 0xcb, 0xe7, 0xea, 0x2f, 0x8e, 0x8b, 0xa3, 0x5b, 0x70, 0x2a, 0x22,
+ 0xc1, 0x8e, 0xeb, 0x39, 0x74, 0x13, 0x13, 0x57, 0x42, 0x66, 0xa2, 0x1e, 0x66, 0xb3, 0xeb, 0xbc,
+ 0x18, 0x8d, 0x53, 0x6b, 0x99, 0x54, 0x38, 0xa7, 0x34, 0xba, 0x0b, 0x93, 0x19, 0x18, 0x3e, 0x6f,
+ 0x4f, 0x30, 0xce, 0x1f, 0x16, 0x9c, 0x27, 0xd7, 0x72, 0xe8, 0x0e, 0xda, 0xe0, 0x70, 0x2e, 0x77,
+ 0x74, 0x03, 0x46, 0xd9, 0xce, 0x59, 0x69, 0x35, 0x1a, 0xa2, 0xc2, 0x11, 0x56, 0xe1, 0x13, 0x52,
+ 0x8e, 0x58, 0x32, 0xd1, 0x07, 0xfb, 0x65, 0x88, 0xff, 0xe1, 0x64, 0x69, 0xb4, 0xce, 0xac, 0xa1,
+ 0xad, 0xc0, 0x8d, 0xf6, 0xe8, 0xaa, 0x22, 0x77, 0xa3, 0xc9, 0xd1, 0xb6, 0x9a, 0x21, 0x9d, 0x54,
+ 0x99, 0x4c, 0x75, 0x20, 0x4e, 0x32, 0xa4, 0x47, 0x41, 0x18, 0xd5, 0x5d, 0x6f, 0x72, 0x8c, 0xdf,
+ 0xa7, 0xe4, 0x4e, 0x5a, 0xa5, 0x40, 0xcc, 0x71, 0xcc, 0x12, 0x4a, 0x7f, 0xdc, 0xa0, 0x27, 0xee,
+ 0x38, 0x23, 0x8c, 0x2d, 0xa1, 0x12, 0x81, 0x63, 0x1a, 0x2a, 0x04, 0x47, 0xd1, 0xde, 0x24, 0x62,
+ 0xa4, 0x6a, 0x43, 0x5c, 0x5b, 0xfb, 0x38, 0xa6, 0x70, 0x7b, 0x1d, 0x46, 0xd4, 0x36, 0xc1, 0xfa,
+ 0x04, 0x95, 0xa1, 0x97, 0x89, 0x7d, 0x42, 0x8f, 0x59, 0xa2, 0x4d, 0x60, 0x22, 0x21, 0xe6, 0x70,
+ 0xd6, 0x04, 0xf7, 0x1e, 0x99, 0xdd, 0x8b, 0x08, 0xd7, 0x45, 0x14, 0xb5, 0x26, 0x48, 0x04, 0x8e,
+ 0x69, 0xec, 0xff, 0xc9, 0xc5, 0xe7, 0xf8, 0x94, 0xe8, 0xe2, 0x5c, 0x7c, 0x16, 0x06, 0xb6, 0xfc,
+ 0x30, 0xa2, 0xd4, 0xac, 0x8e, 0xde, 0x58, 0x60, 0xbe, 0x26, 0xe0, 0x58, 0x51, 0xa0, 0x57, 0x61,
+ 0xb8, 0xa6, 0x57, 0x20, 0x0e, 0x75, 0xb5, 0x8d, 0x18, 0xb5, 0x63, 0x93, 0x16, 0xbd, 0x0c, 0x03,
+ 0xcc, 0xbb, 0xa7, 0xe6, 0x37, 0x84, 0xb4, 0x29, 0x25, 0x93, 0x81, 0x8a, 0x80, 0x1f, 0x68, 0xbf,
+ 0xb1, 0xa2, 0x46, 0x17, 0xa1, 0x8f, 0x36, 0x61, 0xa9, 0x22, 0x8e, 0x53, 0xa5, 0x92, 0xbb, 0xc6,
+ 0xa0, 0x58, 0x60, 0xed, 0x5f, 0xb3, 0x98, 0x2c, 0x95, 0xde, 0xf3, 0xd1, 0x35, 0x76, 0x68, 0xb0,
+ 0x13, 0x44, 0x53, 0x89, 0x3d, 0xae, 0x9d, 0x04, 0x0a, 0x77, 0x90, 0xf8, 0x8f, 0x8d, 0x92, 0xe8,
+ 0xcd, 0xe4, 0xc9, 0xc0, 0x05, 0x8a, 0x17, 0x65, 0x17, 0x24, 0x4f, 0x87, 0x47, 0xe2, 0x23, 0x8e,
+ 0xb6, 0xa7, 0xdd, 0x11, 0x61, 0xff, 0x5f, 0x05, 0x6d, 0x96, 0x54, 0x23, 0x27, 0x22, 0xa8, 0x02,
+ 0xfd, 0x77, 0x1c, 0x37, 0x72, 0xbd, 0x4d, 0x21, 0xf7, 0xb5, 0x3f, 0xe8, 0x58, 0xa1, 0xdb, 0xbc,
+ 0x00, 0x97, 0x5e, 0xc4, 0x1f, 0x2c, 0xd9, 0x50, 0x8e, 0x41, 0xcb, 0xf3, 0x28, 0xc7, 0x42, 0xb7,
+ 0x1c, 0x31, 0x2f, 0xc0, 0x39, 0x8a, 0x3f, 0x58, 0xb2, 0x41, 0x6f, 0x01, 0xc8, 0x1d, 0x82, 0xd4,
+ 0x85, 0x57, 0xd0, 0xb3, 0x9d, 0x99, 0xae, 0xa9, 0x32, 0xb3, 0x23, 0x54, 0x36, 0x8a, 0xff, 0x63,
+ 0x8d, 0x9f, 0x1d, 0x69, 0x63, 0xaa, 0x37, 0x06, 0x7d, 0x82, 0x2e, 0x51, 0x27, 0x88, 0x48, 0x7d,
+ 0x26, 0x12, 0x9d, 0xf3, 0x74, 0x77, 0x97, 0xc3, 0x35, 0x77, 0x87, 0xe8, 0xcb, 0x59, 0x30, 0xc1,
+ 0x31, 0x3f, 0xfb, 0x97, 0x8a, 0x30, 0x99, 0xd7, 0x5c, 0xba, 0x68, 0xc8, 0x5d, 0x37, 0x9a, 0xa3,
+ 0x62, 0xad, 0x65, 0x2e, 0x9a, 0x05, 0x01, 0xc7, 0x8a, 0x82, 0xce, 0xde, 0xd0, 0xdd, 0x94, 0x77,
+ 0xfb, 0xde, 0x78, 0xf6, 0x56, 0x19, 0x14, 0x0b, 0x2c, 0xa5, 0x0b, 0x88, 0x13, 0x0a, 0xb7, 0x33,
+ 0x6d, 0x96, 0x63, 0x06, 0xc5, 0x02, 0xab, 0x6b, 0x19, 0x7b, 0x3a, 0x68, 0x19, 0x8d, 0x2e, 0xea,
+ 0x3d, 0xda, 0x2e, 0x42, 0x9f, 0x02, 0xd8, 0x70, 0x3d, 0x37, 0xdc, 0x62, 0xdc, 0xfb, 0x0e, 0xcd,
+ 0x5d, 0x09, 0xc5, 0x8b, 0x8a, 0x0b, 0xd6, 0x38, 0xa2, 0x97, 0x60, 0x50, 0x6d, 0x20, 0x4b, 0xf3,
+ 0xcc, 0x06, 0xaf, 0xf9, 0x34, 0xc5, 0xbb, 0xe9, 0x3c, 0xd6, 0xe9, 0xec, 0xcf, 0x24, 0xe7, 0x8b,
+ 0x58, 0x01, 0x5a, 0xff, 0x5a, 0xdd, 0xf6, 0x6f, 0xa1, 0x7d, 0xff, 0xda, 0xdf, 0xe8, 0x83, 0x51,
+ 0xa3, 0xb2, 0x56, 0xd8, 0xc5, 0x9e, 0x7b, 0x95, 0x1e, 0x40, 0x4e, 0x44, 0xc4, 0xfa, 0xb3, 0x3b,
+ 0x2f, 0x15, 0xfd, 0x90, 0xa2, 0x2b, 0x80, 0x97, 0x47, 0x9f, 0x82, 0x52, 0xc3, 0x09, 0x99, 0xc6,
+ 0x92, 0x88, 0x75, 0xd7, 0x0d, 0xb3, 0xf8, 0x42, 0xe8, 0x84, 0x91, 0x76, 0xea, 0x73, 0xde, 0x31,
+ 0x4b, 0x7a, 0x52, 0x52, 0xf9, 0x4a, 0xfa, 0x35, 0xaa, 0x46, 0x50, 0x21, 0x6c, 0x0f, 0x73, 0x1c,
+ 0x7a, 0x99, 0x6d, 0xad, 0x74, 0x56, 0xcc, 0x51, 0x69, 0x94, 0x4d, 0xb3, 0x5e, 0x43, 0xc8, 0x56,
+ 0x38, 0x6c, 0x50, 0xc6, 0x77, 0xb2, 0xbe, 0x36, 0x77, 0xb2, 0xa7, 0xa0, 0x9f, 0xfd, 0x50, 0x33,
+ 0x40, 0x8d, 0xc6, 0x12, 0x07, 0x63, 0x89, 0x4f, 0x4e, 0x98, 0x81, 0xee, 0x26, 0x0c, 0xbd, 0xf5,
+ 0x89, 0x49, 0xcd, 0xfc, 0x1f, 0x06, 0xf8, 0x2e, 0x27, 0xa6, 0x3c, 0x96, 0x38, 0xf4, 0x33, 0x16,
+ 0x20, 0xa7, 0x41, 0x6f, 0xcb, 0x14, 0xac, 0x2e, 0x37, 0xc0, 0x44, 0xed, 0x57, 0x3b, 0x76, 0x7b,
+ 0x2b, 0x9c, 0x9e, 0x49, 0x95, 0xe6, 0x9a, 0xd2, 0x57, 0x44, 0x13, 0x51, 0x9a, 0x40, 0x3f, 0x8c,
+ 0x96, 0xdd, 0x30, 0xfa, 0xfc, 0x1f, 0x25, 0x0e, 0xa7, 0x8c, 0x26, 0xa1, 0x9b, 0xfa, 0xe5, 0x6b,
+ 0xf0, 0x90, 0x97, 0xaf, 0xe1, 0xbc, 0x8b, 0xd7, 0x54, 0x0b, 0x4e, 0xe7, 0x7c, 0x41, 0x86, 0xfe,
+ 0x75, 0x5e, 0xd7, 0xbf, 0x76, 0xd0, 0xda, 0x4d, 0xcb, 0x3a, 0xa6, 0xdf, 0x68, 0x39, 0x5e, 0xe4,
+ 0x46, 0x7b, 0xba, 0xbe, 0xf6, 0x69, 0x18, 0x99, 0x77, 0xc8, 0x8e, 0xef, 0x2d, 0x78, 0xf5, 0xa6,
+ 0xef, 0x7a, 0x11, 0x9a, 0x84, 0x1e, 0x26, 0x7c, 0xf0, 0xad, 0xb7, 0x87, 0xf6, 0x1e, 0x66, 0x10,
+ 0x7b, 0x13, 0x4e, 0xce, 0xfb, 0x77, 0xbc, 0x3b, 0x4e, 0x50, 0x9f, 0xa9, 0x2c, 0x69, 0xfa, 0xa4,
+ 0x55, 0xa9, 0xcf, 0xb0, 0xf2, 0x6f, 0x8b, 0x5a, 0x49, 0x7e, 0x1d, 0x5a, 0x74, 0x1b, 0x24, 0x47,
+ 0xeb, 0xf7, 0xff, 0x16, 0x8c, 0x9a, 0x62, 0x7a, 0x65, 0x77, 0xb6, 0x72, 0xed, 0xce, 0x6f, 0xc0,
+ 0xc0, 0x86, 0x4b, 0x1a, 0x75, 0x4c, 0x36, 0x44, 0xef, 0x3c, 0x99, 0xef, 0x99, 0xb6, 0x48, 0x29,
+ 0xa5, 0x96, 0x97, 0x6b, 0x43, 0x16, 0x45, 0x61, 0xac, 0xd8, 0xa0, 0x6d, 0x18, 0x93, 0x7d, 0x28,
+ 0xb1, 0x62, 0x3f, 0x78, 0xaa, 0xdd, 0xc0, 0x9b, 0xcc, 0x4f, 0xdc, 0xdf, 0x2f, 0x8f, 0xe1, 0x04,
+ 0x1b, 0x9c, 0x62, 0x8c, 0xce, 0x42, 0xcf, 0x0e, 0x3d, 0xf9, 0x7a, 0x58, 0xf7, 0x33, 0xf5, 0x07,
+ 0xd3, 0xe4, 0x30, 0xa8, 0xfd, 0x63, 0x16, 0x9c, 0x4e, 0xf5, 0x8c, 0xd0, 0x68, 0x1d, 0xf1, 0x28,
+ 0x24, 0x35, 0x4c, 0x85, 0xce, 0x1a, 0x26, 0xfb, 0x6f, 0x59, 0x70, 0x62, 0x61, 0xa7, 0x19, 0xed,
+ 0xcd, 0xbb, 0xa6, 0x91, 0xf8, 0x83, 0xd0, 0xb7, 0x43, 0xea, 0x6e, 0x6b, 0x47, 0x8c, 0x5c, 0x59,
+ 0x9e, 0x0e, 0x2b, 0x0c, 0x7a, 0xb0, 0x5f, 0x1e, 0xae, 0x46, 0x7e, 0xe0, 0x6c, 0x12, 0x0e, 0xc0,
+ 0x82, 0x9c, 0x9d, 0xb1, 0xee, 0x3d, 0xb2, 0xec, 0xee, 0xb8, 0xd1, 0x83, 0xcd, 0x76, 0x61, 0xdf,
+ 0x95, 0x4c, 0x70, 0xcc, 0xcf, 0xfe, 0xba, 0x05, 0xa3, 0x72, 0xde, 0xcf, 0xd4, 0xeb, 0x01, 0x09,
+ 0x43, 0x34, 0x05, 0x05, 0xb7, 0x29, 0x5a, 0x09, 0xa2, 0x95, 0x85, 0xa5, 0x0a, 0x2e, 0xb8, 0x4d,
+ 0x29, 0xce, 0xb3, 0x03, 0xa8, 0x68, 0x9a, 0xba, 0xaf, 0x09, 0x38, 0x56, 0x14, 0xe8, 0x12, 0x0c,
+ 0x78, 0x7e, 0x9d, 0x4b, 0xc4, 0x5c, 0x94, 0x60, 0x13, 0x6c, 0x55, 0xc0, 0xb0, 0xc2, 0xa2, 0x0a,
+ 0x94, 0xb8, 0x23, 0x64, 0x3c, 0x69, 0xbb, 0x72, 0xa7, 0x64, 0x5f, 0xb6, 0x26, 0x4b, 0xe2, 0x98,
+ 0x89, 0xfd, 0x1b, 0x16, 0x0c, 0xc9, 0x2f, 0xeb, 0xf2, 0xae, 0x42, 0x97, 0x56, 0x7c, 0x4f, 0x89,
+ 0x97, 0x16, 0xbd, 0x6b, 0x30, 0x8c, 0x71, 0xc5, 0x28, 0x1e, 0xea, 0x8a, 0x71, 0x05, 0x06, 0x9d,
+ 0x66, 0xb3, 0x62, 0xde, 0x4f, 0xd8, 0x54, 0x9a, 0x89, 0xc1, 0x58, 0xa7, 0xb1, 0x7f, 0xb4, 0x00,
+ 0x23, 0xf2, 0x0b, 0xaa, 0xad, 0xf5, 0x90, 0x44, 0x68, 0x0d, 0x4a, 0x0e, 0x1f, 0x25, 0x22, 0x27,
+ 0xf9, 0x63, 0xd9, 0x7a, 0x33, 0x63, 0x48, 0x63, 0x41, 0x6b, 0x46, 0x96, 0xc6, 0x31, 0x23, 0xd4,
+ 0x80, 0x71, 0xcf, 0x8f, 0xd8, 0xa1, 0xab, 0xf0, 0xed, 0x4c, 0x99, 0x49, 0xee, 0x67, 0x04, 0xf7,
+ 0xf1, 0xd5, 0x24, 0x17, 0x9c, 0x66, 0x8c, 0x16, 0xa4, 0x2e, 0xb2, 0x98, 0xaf, 0x44, 0xd2, 0x07,
+ 0x2e, 0x5b, 0x15, 0x69, 0xff, 0xaa, 0x05, 0x25, 0x49, 0x76, 0x1c, 0x56, 0xeb, 0x15, 0xe8, 0x0f,
+ 0xd9, 0x20, 0xc8, 0xae, 0xb1, 0xdb, 0x35, 0x9c, 0x8f, 0x57, 0x2c, 0x4b, 0xf0, 0xff, 0x21, 0x96,
+ 0x3c, 0x98, 0x29, 0x4a, 0x35, 0xff, 0x5d, 0x62, 0x8a, 0x52, 0xed, 0xc9, 0x39, 0x94, 0xfe, 0x94,
+ 0xb5, 0x59, 0xd3, 0xed, 0x52, 0x91, 0xb7, 0x19, 0x90, 0x0d, 0xf7, 0x6e, 0x52, 0xe4, 0xad, 0x30,
+ 0x28, 0x16, 0x58, 0xf4, 0x16, 0x0c, 0xd5, 0xa4, 0x0d, 0x22, 0x5e, 0xe1, 0x17, 0xdb, 0xda, 0xc3,
+ 0x94, 0xe9, 0x94, 0xeb, 0xd0, 0xe6, 0xb4, 0xf2, 0xd8, 0xe0, 0x66, 0x3a, 0xfa, 0x14, 0x3b, 0x39,
+ 0xfa, 0xc4, 0x7c, 0xf3, 0xdd, 0x5e, 0x7e, 0xdc, 0x82, 0x3e, 0xae, 0x7b, 0xee, 0x4e, 0xf5, 0xaf,
+ 0x59, 0x92, 0xe3, 0xbe, 0xbb, 0x45, 0x81, 0x42, 0xd2, 0x40, 0x2b, 0x50, 0x62, 0x3f, 0x98, 0xee,
+ 0xbc, 0x98, 0xff, 0x0e, 0x87, 0xd7, 0xaa, 0x37, 0xf0, 0x96, 0x2c, 0x86, 0x63, 0x0e, 0xf6, 0x8f,
+ 0x14, 0xe9, 0xee, 0x16, 0x93, 0x1a, 0x87, 0xbe, 0xf5, 0xf0, 0x0e, 0xfd, 0xc2, 0xc3, 0x3a, 0xf4,
+ 0x37, 0x61, 0xb4, 0xa6, 0xd9, 0x9d, 0xe3, 0x91, 0xbc, 0xd4, 0x76, 0x92, 0x68, 0x26, 0x6a, 0xae,
+ 0x9d, 0x9b, 0x33, 0x99, 0xe0, 0x24, 0x57, 0xf4, 0x09, 0x18, 0xe2, 0xe3, 0x2c, 0x6a, 0xe1, 0xbe,
+ 0x52, 0x4f, 0xe4, 0xcf, 0x17, 0xbd, 0x0a, 0xae, 0xcd, 0xd5, 0x8a, 0x63, 0x83, 0x99, 0xfd, 0x97,
+ 0x16, 0xa0, 0x85, 0xe6, 0x16, 0xd9, 0x21, 0x81, 0xd3, 0x88, 0xcd, 0x47, 0x3f, 0x68, 0xc1, 0x24,
+ 0x49, 0x81, 0xe7, 0xfc, 0x9d, 0x1d, 0x71, 0x59, 0xcc, 0xd1, 0x67, 0x2c, 0xe4, 0x94, 0x51, 0x0f,
+ 0x95, 0x26, 0xf3, 0x28, 0x70, 0x6e, 0x7d, 0x68, 0x05, 0x26, 0xf8, 0x29, 0xa9, 0x10, 0x9a, 0xdf,
+ 0xd5, 0x23, 0x82, 0xf1, 0xc4, 0x5a, 0x9a, 0x04, 0x67, 0x95, 0xb3, 0x7f, 0x75, 0x18, 0x72, 0x5b,
+ 0xf1, 0x9e, 0xdd, 0xec, 0x3d, 0xbb, 0xd9, 0x7b, 0x76, 0xb3, 0xf7, 0xec, 0x66, 0xef, 0xd9, 0xcd,
+ 0xde, 0xb3, 0x9b, 0xbd, 0x4b, 0xed, 0x66, 0xff, 0xb7, 0x05, 0x27, 0xd5, 0xf1, 0x65, 0x5c, 0xd8,
+ 0x3f, 0x0b, 0x13, 0x7c, 0xb9, 0x19, 0x3e, 0xc6, 0xe2, 0xb8, 0xbe, 0x92, 0x39, 0x73, 0x13, 0xbe,
+ 0xf0, 0x46, 0x41, 0xfe, 0xa8, 0x28, 0x03, 0x81, 0xb3, 0xaa, 0xb1, 0x7f, 0x69, 0x00, 0x7a, 0x17,
+ 0x76, 0x89, 0x17, 0x1d, 0xc3, 0xd5, 0xa6, 0x06, 0x23, 0xae, 0xb7, 0xeb, 0x37, 0x76, 0x49, 0x9d,
+ 0xe3, 0x0f, 0x73, 0x03, 0x3f, 0x25, 0x58, 0x8f, 0x2c, 0x19, 0x2c, 0x70, 0x82, 0xe5, 0xc3, 0xb0,
+ 0x3e, 0x5c, 0x85, 0x3e, 0x7e, 0xf8, 0x08, 0xd3, 0x43, 0xe6, 0x9e, 0xcd, 0x3a, 0x51, 0x1c, 0xa9,
+ 0xb1, 0x65, 0x84, 0x1f, 0x6e, 0xa2, 0x38, 0xfa, 0x0c, 0x8c, 0x6c, 0xb8, 0x41, 0x18, 0xad, 0xb9,
+ 0x3b, 0xf4, 0x68, 0xd8, 0x69, 0x3e, 0x80, 0xb5, 0x41, 0xf5, 0xc3, 0xa2, 0xc1, 0x09, 0x27, 0x38,
+ 0xa3, 0x4d, 0x18, 0x6e, 0x38, 0x7a, 0x55, 0xfd, 0x87, 0xae, 0x4a, 0x9d, 0x0e, 0xcb, 0x3a, 0x23,
+ 0x6c, 0xf2, 0xa5, 0xcb, 0xa9, 0xc6, 0x14, 0xe6, 0x03, 0x4c, 0x9d, 0xa1, 0x96, 0x13, 0xd7, 0x94,
+ 0x73, 0x1c, 0x15, 0xd0, 0x98, 0x23, 0x7b, 0xc9, 0x14, 0xd0, 0x34, 0x77, 0xf5, 0x4f, 0x43, 0x89,
+ 0xd0, 0x2e, 0xa4, 0x8c, 0xc5, 0x01, 0x73, 0xb9, 0xbb, 0xb6, 0xae, 0xb8, 0xb5, 0xc0, 0x37, 0xed,
+ 0x3c, 0x0b, 0x92, 0x13, 0x8e, 0x99, 0xa2, 0x39, 0xe8, 0x0b, 0x49, 0xe0, 0x2a, 0x5d, 0x72, 0x9b,
+ 0x61, 0x64, 0x64, 0xfc, 0xd5, 0x1a, 0xff, 0x8d, 0x45, 0x51, 0x3a, 0xbd, 0x1c, 0xa6, 0x8a, 0x65,
+ 0x87, 0x81, 0x36, 0xbd, 0x66, 0x18, 0x14, 0x0b, 0x2c, 0x7a, 0x1d, 0xfa, 0x03, 0xd2, 0x60, 0x86,
+ 0xc4, 0xe1, 0xee, 0x27, 0x39, 0xb7, 0x4b, 0xf2, 0x72, 0x58, 0x32, 0x40, 0xd7, 0x01, 0x05, 0x84,
+ 0x0a, 0x78, 0xae, 0xb7, 0xa9, 0xdc, 0xbb, 0xc5, 0x46, 0xab, 0x04, 0x69, 0x1c, 0x53, 0xc8, 0x07,
+ 0x8b, 0x38, 0xa3, 0x18, 0xba, 0x0a, 0xe3, 0x0a, 0xba, 0xe4, 0x85, 0x91, 0x43, 0x37, 0xb8, 0x51,
+ 0xc6, 0x4b, 0xe9, 0x57, 0x70, 0x92, 0x00, 0xa7, 0xcb, 0xd8, 0x3f, 0x67, 0x01, 0xef, 0xe7, 0x63,
+ 0xd0, 0x2a, 0xbc, 0x66, 0x6a, 0x15, 0xce, 0xe4, 0x8e, 0x5c, 0x8e, 0x46, 0xe1, 0xe7, 0x2c, 0x18,
+ 0xd4, 0x46, 0x36, 0x9e, 0xb3, 0x56, 0x9b, 0x39, 0xdb, 0x82, 0x31, 0x3a, 0xd3, 0x6f, 0xac, 0x87,
+ 0x24, 0xd8, 0x25, 0x75, 0x36, 0x31, 0x0b, 0x0f, 0x36, 0x31, 0x95, 0x2b, 0xe9, 0x72, 0x82, 0x21,
+ 0x4e, 0x55, 0x61, 0x7f, 0x5a, 0x36, 0x55, 0x79, 0xde, 0xd6, 0xd4, 0x98, 0x27, 0x3c, 0x6f, 0xd5,
+ 0xa8, 0xe2, 0x98, 0x86, 0x2e, 0xb5, 0x2d, 0x3f, 0x8c, 0x92, 0x9e, 0xb7, 0xd7, 0xfc, 0x30, 0xc2,
+ 0x0c, 0x63, 0xbf, 0x00, 0xb0, 0x70, 0x97, 0xd4, 0xf8, 0x8c, 0xd5, 0x2f, 0x3d, 0x56, 0xfe, 0xa5,
+ 0xc7, 0xfe, 0x3d, 0x0b, 0x46, 0x16, 0xe7, 0x8c, 0x93, 0x6b, 0x1a, 0x80, 0xdf, 0xd4, 0x6e, 0xdf,
+ 0x5e, 0x95, 0xee, 0x1f, 0xdc, 0x02, 0xae, 0xa0, 0x58, 0xa3, 0x40, 0x67, 0xa0, 0xd8, 0x68, 0x79,
+ 0x42, 0xed, 0xd9, 0x4f, 0x8f, 0xc7, 0xe5, 0x96, 0x87, 0x29, 0x4c, 0x7b, 0xac, 0x54, 0xec, 0xfa,
+ 0xb1, 0x52, 0xc7, 0x20, 0x25, 0xa8, 0x0c, 0xbd, 0x77, 0xee, 0xb8, 0x75, 0xfe, 0x14, 0x5c, 0xb8,
+ 0xa6, 0xdc, 0xbe, 0xbd, 0x34, 0x1f, 0x62, 0x0e, 0xb7, 0xbf, 0x58, 0x84, 0xa9, 0xc5, 0x06, 0xb9,
+ 0xfb, 0x0e, 0x9f, 0xc3, 0x77, 0xfb, 0xd4, 0xea, 0x70, 0x0a, 0xa4, 0xc3, 0x3e, 0xa7, 0xeb, 0xdc,
+ 0x1f, 0x1b, 0xd0, 0xcf, 0x1d, 0x4f, 0xe5, 0xe3, 0xf8, 0x4c, 0x73, 0x5f, 0x7e, 0x87, 0x4c, 0x73,
+ 0x07, 0x56, 0x61, 0xee, 0x53, 0x07, 0xa6, 0x80, 0x62, 0xc9, 0x7c, 0xea, 0x15, 0x18, 0xd2, 0x29,
+ 0x0f, 0xf5, 0xb0, 0xf5, 0xbb, 0x8b, 0x30, 0x46, 0x5b, 0xf0, 0x50, 0x07, 0xe2, 0x66, 0x7a, 0x20,
+ 0x8e, 0xfa, 0x71, 0x63, 0xe7, 0xd1, 0x78, 0x2b, 0x39, 0x1a, 0x57, 0xf2, 0x46, 0xe3, 0xb8, 0xc7,
+ 0xe0, 0x7b, 0x2c, 0x98, 0x58, 0x6c, 0xf8, 0xb5, 0xed, 0xc4, 0x03, 0xc4, 0x97, 0x60, 0x90, 0x6e,
+ 0xc7, 0xa1, 0x11, 0x8b, 0xc3, 0x88, 0xce, 0x22, 0x50, 0x58, 0xa7, 0xd3, 0x8a, 0xdd, 0xbc, 0xb9,
+ 0x34, 0x9f, 0x15, 0xd4, 0x45, 0xa0, 0xb0, 0x4e, 0x67, 0xff, 0x8e, 0x05, 0xe7, 0xae, 0xce, 0x2d,
+ 0xc4, 0x53, 0x31, 0x15, 0x57, 0xe6, 0x22, 0xf4, 0x35, 0xeb, 0x5a, 0x53, 0x62, 0xb5, 0xf0, 0x3c,
+ 0x6b, 0x85, 0xc0, 0xbe, 0x5b, 0x62, 0x26, 0xdd, 0x04, 0xb8, 0x8a, 0x2b, 0x73, 0x62, 0xdf, 0x95,
+ 0x56, 0x20, 0x2b, 0xd7, 0x0a, 0xf4, 0x04, 0xf4, 0xd3, 0x73, 0xc1, 0xad, 0xc9, 0x76, 0x73, 0x83,
+ 0x3e, 0x07, 0x61, 0x89, 0xb3, 0x7f, 0xd6, 0x82, 0x89, 0xab, 0x6e, 0x44, 0x0f, 0xed, 0x64, 0xe0,
+ 0x14, 0x7a, 0x6a, 0x87, 0x6e, 0xe4, 0x07, 0x7b, 0xc9, 0xc0, 0x29, 0x58, 0x61, 0xb0, 0x46, 0xc5,
+ 0x3f, 0x68, 0xd7, 0x65, 0x2f, 0x29, 0x0a, 0xa6, 0xdd, 0x0d, 0x0b, 0x38, 0x56, 0x14, 0xb4, 0xbf,
+ 0xea, 0x6e, 0xc0, 0x54, 0x96, 0x7b, 0x62, 0xe3, 0x56, 0xfd, 0x35, 0x2f, 0x11, 0x38, 0xa6, 0xb1,
+ 0xff, 0xdc, 0x82, 0xf2, 0xd5, 0x46, 0x2b, 0x8c, 0x48, 0xb0, 0x11, 0xe6, 0x6c, 0xba, 0x2f, 0x40,
+ 0x89, 0x48, 0x03, 0x81, 0x7c, 0xf2, 0x29, 0x05, 0x51, 0x65, 0x39, 0xe0, 0xf1, 0x5b, 0x14, 0x5d,
+ 0x17, 0xaf, 0xa4, 0x0f, 0xf7, 0xcc, 0x75, 0x11, 0x10, 0xd1, 0xeb, 0xd2, 0x03, 0xda, 0xb0, 0xc8,
+ 0x18, 0x0b, 0x29, 0x2c, 0xce, 0x28, 0x61, 0xff, 0x98, 0x05, 0x27, 0xd5, 0x07, 0xbf, 0xeb, 0x3e,
+ 0xd3, 0xfe, 0x6a, 0x01, 0x86, 0xaf, 0xad, 0xad, 0x55, 0xae, 0x92, 0x48, 0x9b, 0x95, 0xed, 0xcd,
+ 0xfe, 0x58, 0xb3, 0x5e, 0xb6, 0xbb, 0x23, 0xb6, 0x22, 0xb7, 0x31, 0xcd, 0xe3, 0xa2, 0x4d, 0x2f,
+ 0x79, 0xd1, 0x8d, 0xa0, 0x1a, 0x05, 0xae, 0xb7, 0x99, 0x39, 0xd3, 0xa5, 0xcc, 0x52, 0xcc, 0x93,
+ 0x59, 0xd0, 0x0b, 0xd0, 0xc7, 0x02, 0xb3, 0xc9, 0x41, 0x78, 0x44, 0x5d, 0xb1, 0x18, 0xf4, 0x60,
+ 0xbf, 0x5c, 0xba, 0x89, 0x97, 0xf8, 0x1f, 0x2c, 0x48, 0xd1, 0x4d, 0x18, 0xdc, 0x8a, 0xa2, 0xe6,
+ 0x35, 0xe2, 0xd4, 0x49, 0x20, 0x77, 0xd9, 0xf3, 0x59, 0xbb, 0x2c, 0xed, 0x04, 0x4e, 0x16, 0x6f,
+ 0x4c, 0x31, 0x2c, 0xc4, 0x3a, 0x1f, 0xbb, 0x0a, 0x10, 0xe3, 0x8e, 0xc8, 0x70, 0x63, 0xaf, 0x41,
+ 0x89, 0x7e, 0xee, 0x4c, 0xc3, 0x75, 0xda, 0x9b, 0xc6, 0x9f, 0x81, 0x92, 0x34, 0x7c, 0x87, 0x22,
+ 0x8a, 0x03, 0x3b, 0x91, 0xa4, 0x5d, 0x3c, 0xc4, 0x31, 0xde, 0x7e, 0x1c, 0x84, 0x6f, 0x69, 0x3b,
+ 0x96, 0xf6, 0x06, 0x9c, 0x60, 0x4e, 0xb2, 0x4e, 0xb4, 0x65, 0xcc, 0xd1, 0xce, 0x93, 0xe1, 0x59,
+ 0x71, 0xaf, 0xe3, 0x5f, 0x36, 0xa9, 0x3d, 0x4e, 0x1e, 0x92, 0x1c, 0xe3, 0x3b, 0x9e, 0xfd, 0x67,
+ 0x3d, 0xf0, 0xc8, 0x52, 0x35, 0x3f, 0xfc, 0xd0, 0xcb, 0x30, 0xc4, 0xc5, 0x45, 0x3a, 0x35, 0x9c,
+ 0x86, 0xa8, 0x57, 0x69, 0x40, 0xd7, 0x34, 0x1c, 0x36, 0x28, 0xd1, 0x39, 0x28, 0xba, 0x6f, 0x7b,
+ 0xc9, 0xa7, 0x7b, 0x4b, 0x6f, 0xac, 0x62, 0x0a, 0xa7, 0x68, 0x2a, 0x79, 0xf2, 0x2d, 0x5d, 0xa1,
+ 0x95, 0xf4, 0xf9, 0x1a, 0x8c, 0xb8, 0x61, 0x2d, 0x74, 0x97, 0x3c, 0xba, 0x4e, 0xb5, 0x95, 0xae,
+ 0x74, 0x0e, 0xb4, 0xd1, 0x0a, 0x8b, 0x13, 0xd4, 0xda, 0xf9, 0xd2, 0xdb, 0xb5, 0xf4, 0xda, 0x31,
+ 0xf8, 0x01, 0xdd, 0xfe, 0x9b, 0xec, 0xeb, 0x42, 0xa6, 0x82, 0x17, 0xdb, 0x3f, 0xff, 0xe0, 0x10,
+ 0x4b, 0x1c, 0xbd, 0xd0, 0xd5, 0xb6, 0x9c, 0xe6, 0x4c, 0x2b, 0xda, 0x9a, 0x77, 0xc3, 0x9a, 0xbf,
+ 0x4b, 0x82, 0x3d, 0x76, 0x17, 0x1f, 0x88, 0x2f, 0x74, 0x0a, 0x31, 0x77, 0x6d, 0xa6, 0x42, 0x29,
+ 0x71, 0xba, 0x0c, 0x9a, 0x81, 0x51, 0x09, 0xac, 0x92, 0x90, 0x1d, 0x01, 0x83, 0x8c, 0x8d, 0x7a,
+ 0x4c, 0x27, 0xc0, 0x8a, 0x49, 0x92, 0xde, 0x14, 0x70, 0xe1, 0x28, 0x04, 0xdc, 0x0f, 0xc2, 0xb0,
+ 0xeb, 0xb9, 0x91, 0xeb, 0x44, 0x3e, 0xb7, 0x1f, 0xf1, 0x6b, 0x37, 0x53, 0x30, 0x2f, 0xe9, 0x08,
+ 0x6c, 0xd2, 0xd9, 0xff, 0xb6, 0x07, 0xc6, 0xd9, 0xb0, 0xbd, 0x37, 0xc3, 0xbe, 0x9d, 0x66, 0xd8,
+ 0xcd, 0xf4, 0x0c, 0x3b, 0x0a, 0xc9, 0xfd, 0x81, 0xa7, 0xd9, 0x67, 0xa0, 0xa4, 0xde, 0x0f, 0xca,
+ 0x07, 0xc4, 0x56, 0xce, 0x03, 0xe2, 0xce, 0xa7, 0xb7, 0x74, 0x49, 0x2b, 0x66, 0xba, 0xa4, 0x7d,
+ 0xd9, 0x82, 0xd8, 0xb0, 0x80, 0xde, 0x80, 0x52, 0xd3, 0x67, 0x1e, 0xae, 0x81, 0x74, 0x1b, 0x7f,
+ 0xbc, 0xad, 0x65, 0x82, 0x47, 0x60, 0x0b, 0x78, 0x2f, 0x54, 0x64, 0x51, 0x1c, 0x73, 0x41, 0xd7,
+ 0xa1, 0xbf, 0x19, 0x90, 0x6a, 0xc4, 0xc2, 0x03, 0x75, 0xcf, 0x90, 0xcf, 0x1a, 0x5e, 0x10, 0x4b,
+ 0x0e, 0xf6, 0xbf, 0xb7, 0x60, 0x2c, 0x49, 0x8a, 0x3e, 0x0c, 0x3d, 0xe4, 0x2e, 0xa9, 0x89, 0xf6,
+ 0x66, 0x1e, 0xc5, 0xb1, 0x6a, 0x82, 0x77, 0x00, 0xfd, 0x8f, 0x59, 0x29, 0x74, 0x0d, 0xfa, 0xe9,
+ 0x39, 0x7c, 0x55, 0x85, 0xc2, 0x7b, 0x34, 0xef, 0x2c, 0x57, 0x02, 0x0d, 0x6f, 0x9c, 0x00, 0x61,
+ 0x59, 0x9c, 0xf9, 0x81, 0xd5, 0x9a, 0x55, 0x7a, 0xc5, 0x89, 0xda, 0xdd, 0xc4, 0xd7, 0xe6, 0x2a,
+ 0x9c, 0x48, 0x70, 0xe3, 0x7e, 0x60, 0x12, 0x88, 0x63, 0x26, 0xf6, 0x2f, 0x58, 0x00, 0xdc, 0xed,
+ 0xcd, 0xf1, 0x36, 0xc9, 0x31, 0x68, 0xd3, 0xe7, 0xa1, 0x27, 0x6c, 0x92, 0x5a, 0x3b, 0xe7, 0xeb,
+ 0xb8, 0x3d, 0xd5, 0x26, 0xa9, 0xc5, 0x33, 0x8e, 0xfe, 0xc3, 0xac, 0xb4, 0xfd, 0xbd, 0x00, 0x23,
+ 0x31, 0xd9, 0x52, 0x44, 0x76, 0xd0, 0x73, 0x46, 0xd0, 0x91, 0x33, 0x89, 0xa0, 0x23, 0x25, 0x46,
+ 0xad, 0x29, 0x6e, 0x3f, 0x03, 0xc5, 0x1d, 0xe7, 0xae, 0xd0, 0xcc, 0x3d, 0xd3, 0xbe, 0x19, 0x94,
+ 0xff, 0xf4, 0x8a, 0x73, 0x97, 0x5f, 0x5e, 0x9f, 0x91, 0x2b, 0x64, 0xc5, 0xb9, 0xdb, 0xd1, 0x41,
+ 0x98, 0x56, 0xc2, 0xea, 0x72, 0x3d, 0xe1, 0xd1, 0xd5, 0x55, 0x5d, 0xae, 0x97, 0xac, 0xcb, 0xf5,
+ 0xba, 0xa8, 0xcb, 0xf5, 0xd0, 0x3d, 0xe8, 0x17, 0x0e, 0x97, 0x22, 0x2c, 0xd9, 0xe5, 0x2e, 0xea,
+ 0x13, 0xfe, 0x9a, 0xbc, 0xce, 0xcb, 0xf2, 0x72, 0x2e, 0xa0, 0x1d, 0xeb, 0x95, 0x15, 0xa2, 0xff,
+ 0xc7, 0x82, 0x11, 0xf1, 0x1b, 0x93, 0xb7, 0x5b, 0x24, 0x8c, 0x84, 0xf0, 0xfa, 0x81, 0xee, 0xdb,
+ 0x20, 0x0a, 0xf2, 0xa6, 0x7c, 0x40, 0x9e, 0x33, 0x26, 0xb2, 0x63, 0x8b, 0x12, 0xad, 0x40, 0x7f,
+ 0xdb, 0x82, 0x13, 0x3b, 0xce, 0x5d, 0x5e, 0x23, 0x87, 0x61, 0x27, 0x72, 0x7d, 0xe1, 0xb8, 0xf0,
+ 0xe1, 0xee, 0x86, 0x3f, 0x55, 0x9c, 0x37, 0x52, 0x5a, 0x29, 0x4f, 0x64, 0x91, 0x74, 0x6c, 0x6a,
+ 0x66, 0xbb, 0xa6, 0x36, 0x60, 0x40, 0xce, 0xb7, 0x87, 0xe9, 0xdd, 0xcd, 0xea, 0x11, 0x73, 0xed,
+ 0xa1, 0xd6, 0xf3, 0x19, 0x18, 0xd2, 0xe7, 0xd8, 0x43, 0xad, 0xeb, 0x6d, 0x98, 0xc8, 0x98, 0x4b,
+ 0x0f, 0xb5, 0xca, 0x3b, 0x70, 0x26, 0x77, 0x7e, 0x3c, 0x54, 0xef, 0xfc, 0xaf, 0x5a, 0xfa, 0x3e,
+ 0x78, 0x0c, 0x26, 0x8d, 0x39, 0xd3, 0xa4, 0x71, 0xbe, 0xfd, 0xca, 0xc9, 0xb1, 0x6b, 0xbc, 0xa5,
+ 0x37, 0x9a, 0xee, 0xea, 0xe8, 0x75, 0xe8, 0x6b, 0x50, 0x88, 0x74, 0xdb, 0xb5, 0x3b, 0xaf, 0xc8,
+ 0x58, 0x98, 0x64, 0xf0, 0x10, 0x0b, 0x0e, 0xf6, 0x2f, 0x5b, 0xd0, 0x73, 0x0c, 0x3d, 0x81, 0xcd,
+ 0x9e, 0x78, 0x2e, 0x97, 0xb5, 0x88, 0xd0, 0x3e, 0x8d, 0x9d, 0x3b, 0x0b, 0x77, 0x23, 0xe2, 0x85,
+ 0xec, 0x44, 0xce, 0xec, 0x98, 0x9f, 0xb2, 0x60, 0x62, 0xd9, 0x77, 0xea, 0xb3, 0x4e, 0xc3, 0xf1,
+ 0x6a, 0x24, 0x58, 0xf2, 0x36, 0x0f, 0xe5, 0x73, 0x5e, 0xe8, 0xe8, 0x73, 0x3e, 0x27, 0x5d, 0xb6,
+ 0x7a, 0xf2, 0xc7, 0x8f, 0x4a, 0xd2, 0xc9, 0x30, 0x4c, 0x86, 0x73, 0xf1, 0x16, 0x20, 0xbd, 0x95,
+ 0xe2, 0xe5, 0x15, 0x86, 0x7e, 0x97, 0xb7, 0x57, 0x0c, 0xe2, 0x93, 0xd9, 0x12, 0x6e, 0xea, 0xf3,
+ 0xb4, 0x37, 0x45, 0x1c, 0x80, 0x25, 0x23, 0xfb, 0x65, 0xc8, 0x0c, 0x9b, 0xd1, 0x59, 0x7b, 0x61,
+ 0x7f, 0x1c, 0xc6, 0x59, 0xc9, 0x43, 0x6a, 0x06, 0xec, 0x84, 0xce, 0x35, 0x23, 0x04, 0xa8, 0xfd,
+ 0x05, 0x0b, 0x46, 0x57, 0x13, 0x91, 0x11, 0x2f, 0x32, 0x2b, 0x6d, 0x86, 0xaa, 0xbf, 0xca, 0xa0,
+ 0x58, 0x60, 0x8f, 0x5c, 0x15, 0xf6, 0xd7, 0x16, 0xc4, 0x91, 0x6c, 0x8e, 0x41, 0x7c, 0x9b, 0x33,
+ 0xc4, 0xb7, 0x4c, 0x41, 0x56, 0x35, 0x27, 0x4f, 0x7a, 0x43, 0xd7, 0x55, 0x8c, 0xb7, 0x36, 0x32,
+ 0x6c, 0xcc, 0x86, 0x4f, 0xc5, 0x11, 0x33, 0x10, 0x9c, 0x8c, 0xfa, 0x66, 0xff, 0x7e, 0x01, 0x90,
+ 0xa2, 0xed, 0x3a, 0x06, 0x5d, 0xba, 0xc4, 0xd1, 0xc4, 0xa0, 0xdb, 0x05, 0xc4, 0xfc, 0x0c, 0x02,
+ 0xc7, 0x0b, 0x39, 0x5b, 0x57, 0x28, 0xff, 0x0e, 0xe7, 0xc4, 0x30, 0x25, 0x1f, 0xa5, 0x2d, 0xa7,
+ 0xb8, 0xe1, 0x8c, 0x1a, 0x34, 0xff, 0x91, 0xde, 0x6e, 0xfd, 0x47, 0xfa, 0x3a, 0xbc, 0xae, 0xfc,
+ 0x8a, 0x05, 0xc3, 0xaa, 0x9b, 0xde, 0x25, 0x3e, 0xf8, 0xaa, 0x3d, 0x39, 0x1b, 0x68, 0x45, 0x6b,
+ 0x32, 0x3b, 0x58, 0xbe, 0x83, 0xbd, 0x92, 0x75, 0x1a, 0xee, 0x3d, 0xa2, 0x62, 0x96, 0x96, 0xc5,
+ 0xab, 0x57, 0x01, 0x3d, 0xd8, 0x2f, 0x0f, 0xab, 0x7f, 0x3c, 0x26, 0x7b, 0x5c, 0x84, 0x6e, 0xc9,
+ 0xa3, 0x89, 0xa9, 0x88, 0x5e, 0x82, 0xde, 0xe6, 0x96, 0x13, 0x92, 0xc4, 0x5b, 0xa5, 0xde, 0x0a,
+ 0x05, 0x1e, 0xec, 0x97, 0x47, 0x54, 0x01, 0x06, 0xc1, 0x9c, 0xba, 0xfb, 0xc8, 0x7e, 0xe9, 0xc9,
+ 0xd9, 0x31, 0xb2, 0xdf, 0x5f, 0x5a, 0xd0, 0xb3, 0xea, 0xd7, 0x8f, 0x63, 0x0b, 0x78, 0xcd, 0xd8,
+ 0x02, 0xce, 0xe6, 0xa5, 0xcb, 0xc8, 0x5d, 0xfd, 0x8b, 0x89, 0xd5, 0x7f, 0x3e, 0x97, 0x43, 0xfb,
+ 0x85, 0xbf, 0x03, 0x83, 0x2c, 0x09, 0x87, 0x78, 0x97, 0xf5, 0x82, 0xb1, 0xe0, 0xcb, 0x89, 0x05,
+ 0x3f, 0xaa, 0x91, 0x6a, 0x2b, 0xfd, 0x29, 0xe8, 0x17, 0x0f, 0x7d, 0x92, 0x8f, 0x8d, 0x05, 0x2d,
+ 0x96, 0x78, 0xfb, 0xc7, 0x8b, 0x60, 0x24, 0xfd, 0x40, 0xbf, 0x6a, 0xc1, 0x74, 0xc0, 0x1d, 0x80,
+ 0xeb, 0xf3, 0xad, 0xc0, 0xf5, 0x36, 0xab, 0xb5, 0x2d, 0x52, 0x6f, 0x35, 0x5c, 0x6f, 0x73, 0x69,
+ 0xd3, 0xf3, 0x15, 0x78, 0xe1, 0x2e, 0xa9, 0xb5, 0x98, 0x71, 0xae, 0x43, 0x86, 0x11, 0xe5, 0x48,
+ 0xff, 0xfc, 0xfd, 0xfd, 0xf2, 0x34, 0x3e, 0x14, 0x6f, 0x7c, 0xc8, 0xb6, 0xa0, 0xdf, 0xb1, 0xe0,
+ 0x32, 0xcf, 0x85, 0xd1, 0x7d, 0xfb, 0xdb, 0xdc, 0x96, 0x2b, 0x92, 0x55, 0xcc, 0x64, 0x8d, 0x04,
+ 0x3b, 0xb3, 0x1f, 0x14, 0x1d, 0x7a, 0xb9, 0x72, 0xb8, 0xba, 0xf0, 0x61, 0x1b, 0x67, 0xff, 0xc3,
+ 0x22, 0x0c, 0x8b, 0x08, 0x70, 0xe2, 0x0c, 0x78, 0xc9, 0x98, 0x12, 0x8f, 0x26, 0xa6, 0xc4, 0xb8,
+ 0x41, 0x7c, 0x34, 0xdb, 0x7f, 0x08, 0xe3, 0x74, 0x73, 0xbe, 0x46, 0x9c, 0x20, 0x5a, 0x27, 0x0e,
+ 0x77, 0x0b, 0x2b, 0x1e, 0x7a, 0xf7, 0x57, 0xfa, 0xc9, 0xe5, 0x24, 0x33, 0x9c, 0xe6, 0xff, 0xed,
+ 0x74, 0xe6, 0x78, 0x30, 0x96, 0x0a, 0xe2, 0xf7, 0x26, 0x94, 0xd4, 0x2b, 0x15, 0xb1, 0xe9, 0xb4,
+ 0x8f, 0x85, 0x99, 0xe4, 0xc0, 0xd5, 0x5f, 0xf1, 0x0b, 0xa9, 0x98, 0x9d, 0xfd, 0x77, 0x0b, 0x46,
+ 0x85, 0x7c, 0x10, 0x57, 0x61, 0xc0, 0x09, 0x43, 0x77, 0xd3, 0x23, 0xf5, 0x76, 0x1a, 0xca, 0x54,
+ 0x35, 0xec, 0xa5, 0xd0, 0x8c, 0x28, 0x89, 0x15, 0x0f, 0x74, 0x8d, 0x3b, 0xdf, 0xed, 0x92, 0x76,
+ 0xea, 0xc9, 0x14, 0x37, 0x90, 0xee, 0x79, 0xbb, 0x04, 0x8b, 0xf2, 0xe8, 0x93, 0xdc, 0x3b, 0xf2,
+ 0xba, 0xe7, 0xdf, 0xf1, 0xae, 0xfa, 0xbe, 0x8c, 0xf6, 0xd1, 0x1d, 0xc3, 0x71, 0xe9, 0x13, 0xa9,
+ 0x8a, 0x63, 0x93, 0x5b, 0x77, 0x51, 0x71, 0x3f, 0x0b, 0x2c, 0xf6, 0xbf, 0xf9, 0x28, 0x3c, 0x44,
+ 0x04, 0x46, 0x45, 0x78, 0x41, 0x09, 0x13, 0x7d, 0x97, 0x79, 0x95, 0x33, 0x4b, 0xc7, 0x8a, 0xf4,
+ 0xeb, 0x26, 0x0b, 0x9c, 0xe4, 0x69, 0xff, 0x8c, 0x05, 0xec, 0x81, 0xec, 0x31, 0xc8, 0x23, 0x1f,
+ 0x31, 0xe5, 0x91, 0xc9, 0xbc, 0x4e, 0xce, 0x11, 0x45, 0x5e, 0xe4, 0x33, 0xab, 0x12, 0xf8, 0x77,
+ 0xf7, 0x84, 0x4b, 0x4b, 0xe7, 0xfb, 0x87, 0xfd, 0xdf, 0x2d, 0xbe, 0x89, 0xc5, 0xe1, 0x04, 0x3e,
+ 0x07, 0x03, 0x35, 0xa7, 0xe9, 0xd4, 0x78, 0x86, 0xaa, 0x5c, 0x8d, 0x9e, 0x51, 0x68, 0x7a, 0x4e,
+ 0x94, 0xe0, 0x1a, 0x2a, 0x19, 0xa6, 0x72, 0x40, 0x82, 0x3b, 0x6a, 0xa5, 0x54, 0x95, 0x53, 0xdb,
+ 0x30, 0x6c, 0x30, 0x7b, 0xa8, 0xea, 0x8c, 0xcf, 0xf1, 0x23, 0x56, 0x85, 0x55, 0xdd, 0x81, 0x71,
+ 0x4f, 0xfb, 0x4f, 0x0f, 0x14, 0x79, 0xb9, 0x7c, 0xbc, 0xd3, 0x21, 0xca, 0x4e, 0x1f, 0xed, 0xed,
+ 0x6d, 0x82, 0x0d, 0x4e, 0x73, 0xb6, 0x7f, 0xc2, 0x82, 0xd3, 0x3a, 0xa1, 0xf6, 0xbc, 0xa7, 0x93,
+ 0x91, 0x64, 0x1e, 0x06, 0xfc, 0x26, 0x09, 0x9c, 0xc8, 0x0f, 0xc4, 0xa9, 0x71, 0x49, 0x76, 0xfa,
+ 0x0d, 0x01, 0x3f, 0x10, 0xf9, 0x16, 0x24, 0x77, 0x09, 0xc7, 0xaa, 0x24, 0xbd, 0x7d, 0xb2, 0xce,
+ 0x08, 0xc5, 0x43, 0x2e, 0xb6, 0x07, 0x30, 0x7b, 0x7b, 0x88, 0x05, 0xc6, 0xfe, 0x33, 0x8b, 0x4f,
+ 0x2c, 0xbd, 0xe9, 0xe8, 0x6d, 0x18, 0xdb, 0x71, 0xa2, 0xda, 0xd6, 0xc2, 0xdd, 0x66, 0xc0, 0x4d,
+ 0x4e, 0xb2, 0x9f, 0x9e, 0xe9, 0xd4, 0x4f, 0xda, 0x47, 0xc6, 0x0e, 0x9f, 0x2b, 0x09, 0x66, 0x38,
+ 0xc5, 0x1e, 0xad, 0xc3, 0x20, 0x83, 0xb1, 0x37, 0x8a, 0x61, 0x3b, 0xd1, 0x20, 0xaf, 0x36, 0xe5,
+ 0xb2, 0xb0, 0x12, 0xf3, 0xc1, 0x3a, 0x53, 0xfb, 0xcb, 0x45, 0xbe, 0xda, 0x99, 0x28, 0xff, 0x14,
+ 0xf4, 0x37, 0xfd, 0xfa, 0xdc, 0xd2, 0x3c, 0x16, 0xa3, 0xa0, 0x8e, 0x91, 0x0a, 0x07, 0x63, 0x89,
+ 0x47, 0x97, 0x60, 0x40, 0xfc, 0x94, 0x26, 0x42, 0xb6, 0x37, 0x0b, 0xba, 0x10, 0x2b, 0x2c, 0x7a,
+ 0x1e, 0xa0, 0x19, 0xf8, 0xbb, 0x6e, 0x9d, 0xc5, 0x2c, 0x29, 0x9a, 0xde, 0x46, 0x15, 0x85, 0xc1,
+ 0x1a, 0x15, 0x7a, 0x15, 0x86, 0x5b, 0x5e, 0xc8, 0xc5, 0x11, 0x2d, 0x32, 0xb4, 0xf2, 0x83, 0xb9,
+ 0xa9, 0x23, 0xb1, 0x49, 0x8b, 0x66, 0xa0, 0x2f, 0x72, 0x98, 0xf7, 0x4c, 0x6f, 0xbe, 0x53, 0xf0,
+ 0x1a, 0xa5, 0xd0, 0x93, 0x21, 0xd1, 0x02, 0x58, 0x14, 0x44, 0x6f, 0xca, 0xe7, 0xc2, 0x7c, 0x63,
+ 0x17, 0xde, 0xf8, 0xdd, 0x1d, 0x02, 0xda, 0x63, 0x61, 0xe1, 0xe5, 0x6f, 0xf0, 0x42, 0xaf, 0x00,
+ 0x90, 0xbb, 0x11, 0x09, 0x3c, 0xa7, 0xa1, 0x7c, 0xde, 0x94, 0x5c, 0x30, 0xef, 0xaf, 0xfa, 0xd1,
+ 0xcd, 0x90, 0x2c, 0x28, 0x0a, 0xac, 0x51, 0xdb, 0xbf, 0x53, 0x02, 0x88, 0xe5, 0x76, 0x74, 0x2f,
+ 0xb5, 0x71, 0x3d, 0xdb, 0x5e, 0xd2, 0x3f, 0xba, 0x5d, 0x0b, 0x7d, 0x9f, 0x05, 0x83, 0x22, 0x34,
+ 0x0b, 0x1b, 0xa1, 0x42, 0xfb, 0x8d, 0xd3, 0x8c, 0x10, 0x43, 0x4b, 0xf0, 0x26, 0xbc, 0x20, 0x67,
+ 0xa8, 0x86, 0xe9, 0xd8, 0x0a, 0xbd, 0x62, 0xf4, 0x7e, 0x79, 0x55, 0x2c, 0x1a, 0x5d, 0xa9, 0xae,
+ 0x8a, 0x25, 0x76, 0x46, 0xe8, 0xb7, 0xc4, 0x9b, 0xc6, 0x2d, 0xb1, 0x27, 0xff, 0x3d, 0xa4, 0x21,
+ 0xbe, 0x76, 0xba, 0x20, 0xa2, 0x8a, 0x1e, 0x1b, 0xa1, 0x37, 0xff, 0x11, 0x9f, 0x76, 0x4f, 0xea,
+ 0x10, 0x17, 0xe1, 0x33, 0x30, 0x5a, 0x37, 0x85, 0x00, 0x31, 0x13, 0x9f, 0xcc, 0xe3, 0x9b, 0x90,
+ 0x19, 0xe2, 0x63, 0x3f, 0x81, 0xc0, 0x49, 0xc6, 0xa8, 0xc2, 0x43, 0x65, 0x2c, 0x79, 0x1b, 0xbe,
+ 0x78, 0x11, 0x62, 0xe7, 0x8e, 0xe5, 0x5e, 0x18, 0x91, 0x1d, 0x4a, 0x19, 0x9f, 0xee, 0xab, 0xa2,
+ 0x2c, 0x56, 0x5c, 0xd0, 0xeb, 0xd0, 0xc7, 0x5e, 0x71, 0x85, 0x93, 0x03, 0xf9, 0x1a, 0x67, 0x33,
+ 0x66, 0x60, 0xbc, 0x20, 0xd9, 0xdf, 0x10, 0x0b, 0x0e, 0xe8, 0x9a, 0x7c, 0x23, 0x19, 0x2e, 0x79,
+ 0x37, 0x43, 0xc2, 0xde, 0x48, 0x96, 0x66, 0x1f, 0x8f, 0x9f, 0x3f, 0x72, 0x78, 0x66, 0xca, 0x44,
+ 0xa3, 0x24, 0x95, 0xa2, 0xc4, 0x7f, 0x99, 0x89, 0x51, 0x44, 0x38, 0xca, 0x6c, 0x9e, 0x99, 0xad,
+ 0x31, 0xee, 0xce, 0x5b, 0x26, 0x0b, 0x9c, 0xe4, 0x49, 0x25, 0x52, 0xbe, 0xea, 0xc5, 0x9b, 0x92,
+ 0x4e, 0x7b, 0x07, 0xbf, 0x88, 0xb3, 0xd3, 0x88, 0x43, 0xb0, 0x28, 0x7f, 0xac, 0xe2, 0xc1, 0x94,
+ 0x07, 0x63, 0xc9, 0x25, 0xfa, 0x50, 0xc5, 0x91, 0x3f, 0xe9, 0x81, 0x11, 0x73, 0x4a, 0xa1, 0xcb,
+ 0x50, 0x12, 0x4c, 0x54, 0x36, 0x13, 0xb5, 0x4a, 0x56, 0x24, 0x02, 0xc7, 0x34, 0x2c, 0x89, 0x0d,
+ 0x2b, 0xae, 0x39, 0x11, 0xc7, 0x49, 0x6c, 0x14, 0x06, 0x6b, 0x54, 0xf4, 0x62, 0xb5, 0xee, 0xfb,
+ 0x91, 0x3a, 0x90, 0xd4, 0xbc, 0x9b, 0x65, 0x50, 0x2c, 0xb0, 0xf4, 0x20, 0xda, 0x26, 0x81, 0x47,
+ 0x1a, 0x66, 0x14, 0x71, 0x75, 0x10, 0x5d, 0xd7, 0x91, 0xd8, 0xa4, 0xa5, 0xc7, 0xa9, 0x1f, 0xb2,
+ 0x89, 0x2c, 0xae, 0x6f, 0xb1, 0x53, 0x76, 0x95, 0x3f, 0x2f, 0x97, 0x78, 0xf4, 0x71, 0x38, 0xad,
+ 0x22, 0x76, 0x61, 0x6e, 0xcd, 0x90, 0x35, 0xf6, 0x19, 0xda, 0x96, 0xd3, 0x73, 0xd9, 0x64, 0x38,
+ 0xaf, 0x3c, 0x7a, 0x0d, 0x46, 0x84, 0x88, 0x2f, 0x39, 0xf6, 0x9b, 0x1e, 0x46, 0xd7, 0x0d, 0x2c,
+ 0x4e, 0x50, 0xcb, 0x38, 0xe8, 0x4c, 0xca, 0x96, 0x1c, 0x06, 0xd2, 0x71, 0xd0, 0x75, 0x3c, 0x4e,
+ 0x95, 0x40, 0x33, 0x30, 0xca, 0x65, 0x30, 0xd7, 0xdb, 0xe4, 0x63, 0x22, 0x9e, 0x7c, 0xa9, 0x25,
+ 0x75, 0xc3, 0x44, 0xe3, 0x24, 0x3d, 0x7a, 0x19, 0x86, 0x9c, 0xa0, 0xb6, 0xe5, 0x46, 0xa4, 0x16,
+ 0xb5, 0x02, 0xfe, 0x16, 0x4c, 0x73, 0xd1, 0x9a, 0xd1, 0x70, 0xd8, 0xa0, 0xb4, 0xef, 0xc1, 0x44,
+ 0x46, 0xdc, 0x09, 0x3a, 0x71, 0x9c, 0xa6, 0x2b, 0xbf, 0x29, 0xe1, 0x07, 0x3d, 0x53, 0x59, 0x92,
+ 0x5f, 0xa3, 0x51, 0xd1, 0xd9, 0xc9, 0xe2, 0x53, 0x68, 0x89, 0x57, 0xd5, 0xec, 0x5c, 0x94, 0x08,
+ 0x1c, 0xd3, 0xd8, 0xff, 0xa9, 0x00, 0xa3, 0x19, 0xb6, 0x15, 0x96, 0xfc, 0x33, 0x71, 0x49, 0x89,
+ 0x73, 0x7d, 0x9a, 0x61, 0xf5, 0x0b, 0x87, 0x08, 0xab, 0x5f, 0xec, 0x14, 0x56, 0xbf, 0xe7, 0x9d,
+ 0x84, 0xd5, 0x37, 0x7b, 0xac, 0xb7, 0xab, 0x1e, 0xcb, 0x08, 0xc5, 0xdf, 0x77, 0xc8, 0x50, 0xfc,
+ 0x46, 0xa7, 0xf7, 0x77, 0xd1, 0xe9, 0x3f, 0x52, 0x80, 0xb1, 0xa4, 0x2b, 0xe9, 0x31, 0xe8, 0x6d,
+ 0x5f, 0x37, 0xf4, 0xb6, 0x97, 0xba, 0x79, 0xa2, 0x9b, 0xab, 0xc3, 0xc5, 0x09, 0x1d, 0xee, 0xd3,
+ 0x5d, 0x71, 0x6b, 0xaf, 0xcf, 0xfd, 0xc9, 0x02, 0x9c, 0xcc, 0x7c, 0x23, 0x7c, 0x0c, 0x7d, 0x73,
+ 0xc3, 0xe8, 0x9b, 0xe7, 0xba, 0x7e, 0xbe, 0x9c, 0xdb, 0x41, 0xb7, 0x13, 0x1d, 0x74, 0xb9, 0x7b,
+ 0x96, 0xed, 0x7b, 0xe9, 0xeb, 0x45, 0x38, 0x9f, 0x59, 0x2e, 0x56, 0x7b, 0x2e, 0x1a, 0x6a, 0xcf,
+ 0xe7, 0x13, 0x6a, 0x4f, 0xbb, 0x7d, 0xe9, 0xa3, 0xd1, 0x83, 0x8a, 0x67, 0xbc, 0x2c, 0x18, 0xc1,
+ 0x03, 0xea, 0x40, 0x8d, 0x67, 0xbc, 0x8a, 0x11, 0x36, 0xf9, 0x7e, 0x3b, 0xe9, 0x3e, 0x7f, 0xdb,
+ 0x82, 0x33, 0x99, 0x63, 0x73, 0x0c, 0xba, 0xae, 0x55, 0x53, 0xd7, 0xf5, 0x54, 0xd7, 0xb3, 0x35,
+ 0x47, 0xf9, 0xf5, 0xd3, 0xbd, 0x39, 0xdf, 0xc2, 0x6e, 0xf2, 0x37, 0x60, 0xd0, 0xa9, 0xd5, 0x48,
+ 0x18, 0xae, 0xf8, 0x75, 0x15, 0x81, 0xfb, 0x39, 0x76, 0xcf, 0x8a, 0xc1, 0x07, 0xfb, 0xe5, 0xa9,
+ 0x24, 0x8b, 0x18, 0x8d, 0x75, 0x0e, 0xe8, 0x93, 0x30, 0x10, 0x8a, 0x73, 0x53, 0x8c, 0xfd, 0x0b,
+ 0x5d, 0x76, 0x8e, 0xb3, 0x4e, 0x1a, 0x66, 0xa8, 0x27, 0xa5, 0xa9, 0x50, 0x2c, 0xcd, 0xb0, 0x30,
+ 0x85, 0x23, 0x0d, 0x0b, 0xf3, 0x3c, 0xc0, 0xae, 0xba, 0x0c, 0x24, 0xf5, 0x0f, 0xda, 0x35, 0x41,
+ 0xa3, 0x42, 0x1f, 0x85, 0xb1, 0x90, 0xc7, 0x42, 0x9c, 0x6b, 0x38, 0x21, 0x7b, 0x6d, 0x23, 0x66,
+ 0x21, 0x0b, 0x27, 0x55, 0x4d, 0xe0, 0x70, 0x8a, 0x1a, 0x2d, 0xca, 0x5a, 0x59, 0xe0, 0x46, 0x3e,
+ 0x31, 0x2f, 0xc6, 0x35, 0x8a, 0xd4, 0xe3, 0x27, 0x92, 0xdd, 0xcf, 0x3a, 0x5e, 0x2b, 0x89, 0x3e,
+ 0x09, 0x40, 0xa7, 0x8f, 0xd0, 0x43, 0xf4, 0xe7, 0x6f, 0x9e, 0x74, 0x57, 0xa9, 0x67, 0x3a, 0x37,
+ 0xb3, 0x97, 0xb7, 0xf3, 0x8a, 0x09, 0xd6, 0x18, 0x22, 0x07, 0x86, 0xe3, 0x7f, 0x71, 0x66, 0xde,
+ 0x4b, 0xb9, 0x35, 0x24, 0x99, 0x33, 0x95, 0xf7, 0xbc, 0xce, 0x02, 0x9b, 0x1c, 0xed, 0x7f, 0x37,
+ 0x00, 0x8f, 0xb4, 0xd9, 0x86, 0xd1, 0x8c, 0x69, 0xea, 0x7d, 0x26, 0x79, 0x7f, 0x9f, 0xca, 0x2c,
+ 0x6c, 0x5c, 0xe8, 0x13, 0xb3, 0xbd, 0xf0, 0x8e, 0x67, 0xfb, 0x0f, 0x59, 0x9a, 0x66, 0x85, 0x3b,
+ 0x95, 0x7e, 0xe4, 0x90, 0xc7, 0xcb, 0x11, 0xaa, 0x5a, 0x36, 0x32, 0xf4, 0x15, 0xcf, 0x77, 0xdd,
+ 0x9c, 0xee, 0x15, 0x18, 0x5f, 0xcd, 0x0e, 0x00, 0xcc, 0x55, 0x19, 0x57, 0x0f, 0xfb, 0xfd, 0xc7,
+ 0x15, 0x0c, 0xf8, 0xf7, 0x2d, 0x38, 0x93, 0x02, 0xf3, 0x36, 0x90, 0x50, 0xc4, 0xa8, 0x5a, 0x7d,
+ 0xc7, 0x8d, 0x97, 0x0c, 0xf9, 0x37, 0x5c, 0x13, 0xdf, 0x70, 0x26, 0x97, 0x2e, 0xd9, 0xf4, 0x1f,
+ 0xfc, 0xa3, 0xf2, 0x04, 0xab, 0xc0, 0x24, 0xc4, 0xf9, 0x4d, 0x3f, 0xde, 0x8b, 0xff, 0x37, 0x27,
+ 0xf6, 0xf1, 0xd4, 0x32, 0x9c, 0x6f, 0xdf, 0xd5, 0x87, 0x7a, 0x9e, 0xfc, 0x7b, 0x16, 0x9c, 0x6b,
+ 0x1b, 0x03, 0xe7, 0x5b, 0x50, 0xce, 0xb5, 0x3f, 0x6f, 0xc1, 0xa3, 0x99, 0x25, 0x0c, 0xef, 0xb8,
+ 0xcb, 0x50, 0xaa, 0x25, 0xf2, 0xa1, 0xc6, 0xd1, 0x20, 0x54, 0x2e, 0xd4, 0x98, 0xc6, 0x70, 0x82,
+ 0x2b, 0x74, 0x74, 0x82, 0xfb, 0x0d, 0x0b, 0x52, 0x67, 0xd5, 0x31, 0x08, 0x4d, 0x4b, 0xa6, 0xd0,
+ 0xf4, 0x78, 0x37, 0xbd, 0x99, 0x23, 0x2f, 0xfd, 0xc5, 0x28, 0x9c, 0xca, 0x79, 0x5d, 0xb8, 0x0b,
+ 0xe3, 0x9b, 0x35, 0x62, 0x3e, 0x27, 0x6f, 0x17, 0x66, 0xa9, 0xed, 0xdb, 0x73, 0x9e, 0x86, 0x36,
+ 0x45, 0x82, 0xd3, 0x55, 0xa0, 0xcf, 0x5b, 0x70, 0xc2, 0xb9, 0x13, 0x2e, 0x50, 0xe1, 0xd7, 0xad,
+ 0xcd, 0x36, 0xfc, 0xda, 0x36, 0x95, 0x2c, 0xe4, 0xb2, 0x7a, 0x31, 0x53, 0x21, 0x79, 0xbb, 0x9a,
+ 0xa2, 0x37, 0xaa, 0x67, 0x49, 0xc7, 0xb3, 0xa8, 0x70, 0x66, 0x5d, 0x08, 0x8b, 0xfc, 0x28, 0xf4,
+ 0x6a, 0xdd, 0x26, 0xe0, 0x41, 0xd6, 0x33, 0x50, 0x2e, 0xcd, 0x49, 0x0c, 0x56, 0x7c, 0xd0, 0xa7,
+ 0xa1, 0xb4, 0x29, 0xdf, 0x36, 0x67, 0x48, 0x8b, 0x71, 0x47, 0xb6, 0x7f, 0xf1, 0xcd, 0xbd, 0x0a,
+ 0x14, 0x11, 0x8e, 0x99, 0xa2, 0xd7, 0xa0, 0xe8, 0x6d, 0x84, 0xed, 0xf2, 0x76, 0x27, 0xdc, 0x47,
+ 0x79, 0x58, 0x91, 0xd5, 0xc5, 0x2a, 0xa6, 0x05, 0xd1, 0x35, 0x28, 0x06, 0xeb, 0x75, 0xa1, 0x4d,
+ 0xcf, 0x5c, 0xa4, 0x78, 0x76, 0x3e, 0xa7, 0x55, 0x8c, 0x13, 0x9e, 0x9d, 0xc7, 0x94, 0x05, 0xaa,
+ 0x40, 0x2f, 0x7b, 0x92, 0x27, 0x64, 0xb3, 0xcc, 0x5b, 0x68, 0x9b, 0xa7, 0xad, 0x3c, 0xf6, 0x08,
+ 0x23, 0xc0, 0x9c, 0x11, 0x5a, 0x83, 0xbe, 0x1a, 0xcb, 0xf1, 0x2c, 0x84, 0xb1, 0xf7, 0x67, 0xea,
+ 0xcd, 0xdb, 0x24, 0xbf, 0x16, 0x6a, 0x64, 0x46, 0x81, 0x05, 0x2f, 0xc6, 0x95, 0x34, 0xb7, 0x36,
+ 0x42, 0xa6, 0x77, 0xcb, 0xe3, 0xda, 0x26, 0xa7, 0xbb, 0xe0, 0xca, 0x28, 0xb0, 0xe0, 0x85, 0x5e,
+ 0x81, 0xc2, 0x46, 0x4d, 0x3c, 0xb7, 0xcb, 0x54, 0xa0, 0x9b, 0x91, 0x61, 0x66, 0xfb, 0xee, 0xef,
+ 0x97, 0x0b, 0x8b, 0x73, 0xb8, 0xb0, 0x51, 0x43, 0xab, 0xd0, 0xbf, 0xc1, 0x63, 0x49, 0x08, 0x1d,
+ 0xf9, 0x93, 0xd9, 0x61, 0x2e, 0x52, 0xe1, 0x26, 0xf8, 0xd3, 0x2d, 0x81, 0xc0, 0x92, 0x09, 0x4b,
+ 0xd7, 0xa1, 0x62, 0x62, 0x88, 0x90, 0x7c, 0xd3, 0x87, 0x8b, 0x63, 0xc2, 0x65, 0xe5, 0x38, 0xb2,
+ 0x06, 0xd6, 0x38, 0xd2, 0x59, 0xed, 0xdc, 0x6b, 0x05, 0x2c, 0x5e, 0xbb, 0x88, 0xdd, 0x94, 0x39,
+ 0xab, 0x67, 0x24, 0x51, 0xbb, 0x59, 0xad, 0x88, 0x70, 0xcc, 0x14, 0x6d, 0xc3, 0xf0, 0x6e, 0xd8,
+ 0xdc, 0x22, 0x72, 0x49, 0xb3, 0x50, 0x4e, 0x39, 0xb2, 0xde, 0x2d, 0x41, 0xe8, 0x06, 0x51, 0xcb,
+ 0x69, 0xa4, 0x76, 0x21, 0x26, 0x97, 0xdf, 0xd2, 0x99, 0x61, 0x93, 0x37, 0xed, 0xfe, 0xb7, 0x5b,
+ 0xfe, 0xfa, 0x5e, 0x44, 0x44, 0x24, 0xbd, 0xcc, 0xee, 0x7f, 0x83, 0x93, 0xa4, 0xbb, 0x5f, 0x20,
+ 0xb0, 0x64, 0x82, 0x6e, 0x89, 0xee, 0x61, 0xbb, 0xe7, 0x58, 0x7e, 0x98, 0xde, 0x19, 0x49, 0x94,
+ 0xd3, 0x29, 0x6c, 0xb7, 0x8c, 0x59, 0xb1, 0x5d, 0xb2, 0xb9, 0xe5, 0x47, 0xbe, 0x97, 0xd8, 0xa1,
+ 0xc7, 0xf3, 0x77, 0xc9, 0x4a, 0x06, 0x7d, 0x7a, 0x97, 0xcc, 0xa2, 0xc2, 0x99, 0x75, 0xa1, 0x3a,
+ 0x8c, 0x34, 0xfd, 0x20, 0xba, 0xe3, 0x07, 0x72, 0x7e, 0xa1, 0x36, 0x3a, 0x3e, 0x83, 0x52, 0xd4,
+ 0xc8, 0x82, 0x54, 0x9a, 0x18, 0x9c, 0xe0, 0x89, 0x3e, 0x06, 0xfd, 0x61, 0xcd, 0x69, 0x90, 0xa5,
+ 0x1b, 0x93, 0x13, 0xf9, 0xc7, 0x4f, 0x95, 0x93, 0xe4, 0xcc, 0x2e, 0x1e, 0x0a, 0x84, 0x93, 0x60,
+ 0xc9, 0x0e, 0x2d, 0x42, 0x2f, 0x4b, 0x83, 0xc9, 0xc2, 0x3e, 0xe6, 0x44, 0x1b, 0x4e, 0x39, 0xf3,
+ 0xf3, 0xbd, 0x89, 0x81, 0x31, 0x2f, 0x4e, 0xd7, 0x80, 0xb8, 0xea, 0xfa, 0xe1, 0xe4, 0xc9, 0xfc,
+ 0x35, 0x20, 0x6e, 0xc8, 0x37, 0xaa, 0xed, 0xd6, 0x80, 0x22, 0xc2, 0x31, 0x53, 0xba, 0x33, 0xd3,
+ 0xdd, 0xf4, 0x54, 0x1b, 0x2f, 0xb4, 0xdc, 0xbd, 0x94, 0xed, 0xcc, 0x74, 0x27, 0xa5, 0x2c, 0xec,
+ 0x3f, 0xee, 0x4f, 0xcb, 0x2c, 0x4c, 0x39, 0xf2, 0xbf, 0x5b, 0x29, 0xbb, 0xf9, 0x07, 0xba, 0xd5,
+ 0xd5, 0x1e, 0xe1, 0xb5, 0xee, 0xf3, 0x16, 0x9c, 0x6a, 0x66, 0x7e, 0x88, 0x10, 0x00, 0xba, 0x53,
+ 0xf9, 0xf2, 0x4f, 0x57, 0x21, 0x42, 0xb3, 0xf1, 0x38, 0xa7, 0xa6, 0xe4, 0xd5, 0xb9, 0xf8, 0x8e,
+ 0xaf, 0xce, 0x2b, 0x30, 0x50, 0xe3, 0xf7, 0x1c, 0x19, 0xda, 0xba, 0xab, 0x00, 0x77, 0x4c, 0x94,
+ 0x10, 0x17, 0xa4, 0x0d, 0xac, 0x58, 0xa0, 0x1f, 0xb6, 0xe0, 0x5c, 0xb2, 0xe9, 0x98, 0x30, 0xb4,
+ 0x88, 0x2b, 0xca, 0xf5, 0x32, 0x8b, 0xe2, 0xfb, 0x53, 0xf2, 0xbf, 0x41, 0x7c, 0xd0, 0x89, 0x00,
+ 0xb7, 0xaf, 0x0c, 0xcd, 0x67, 0x28, 0x86, 0xfa, 0x4c, 0x63, 0x58, 0x17, 0xca, 0xa1, 0x17, 0x61,
+ 0x68, 0xc7, 0x6f, 0x79, 0x91, 0x70, 0x5a, 0x13, 0x0e, 0x34, 0xcc, 0x71, 0x64, 0x45, 0x83, 0x63,
+ 0x83, 0x2a, 0xa1, 0x52, 0x1a, 0x78, 0x60, 0x95, 0xd2, 0x5b, 0x30, 0xe4, 0x69, 0x5e, 0xd6, 0x42,
+ 0x1e, 0xb8, 0x98, 0x1f, 0x13, 0x58, 0xf7, 0xc9, 0xe6, 0xad, 0xd4, 0x21, 0xd8, 0xe0, 0x76, 0xbc,
+ 0xde, 0x6c, 0x3f, 0x5f, 0xc8, 0x10, 0xea, 0xb9, 0x5a, 0xe9, 0xc3, 0xa6, 0x5a, 0xe9, 0x62, 0x52,
+ 0xad, 0x94, 0x32, 0x84, 0x18, 0x1a, 0xa5, 0xee, 0x53, 0x64, 0x75, 0x1d, 0x57, 0xf4, 0xbb, 0x2d,
+ 0x38, 0xcd, 0x34, 0xeb, 0xb4, 0x82, 0x77, 0xac, 0x4d, 0x7f, 0xe4, 0xfe, 0x7e, 0xf9, 0xf4, 0x72,
+ 0x36, 0x3b, 0x9c, 0x57, 0x8f, 0xdd, 0x80, 0x0b, 0x9d, 0x8e, 0x46, 0xe6, 0x41, 0x59, 0x57, 0xa6,
+ 0xf7, 0xd8, 0x83, 0xb2, 0xbe, 0x34, 0x8f, 0x19, 0xa6, 0xdb, 0xa8, 0x59, 0xf6, 0x7f, 0xb0, 0xa0,
+ 0x58, 0xf1, 0xeb, 0xc7, 0x70, 0xe9, 0xfe, 0x88, 0x71, 0xe9, 0x7e, 0x24, 0xfb, 0x50, 0xae, 0xe7,
+ 0x9a, 0x92, 0x16, 0x12, 0xa6, 0xa4, 0x73, 0x79, 0x0c, 0xda, 0x1b, 0x8e, 0x7e, 0xaa, 0x08, 0x83,
+ 0x15, 0xbf, 0xae, 0x9e, 0x2f, 0xfc, 0xe3, 0x07, 0x79, 0xbe, 0x90, 0x9b, 0xf4, 0x44, 0xe3, 0xcc,
+ 0x1c, 0x2f, 0xe5, 0xcb, 0xed, 0x6f, 0xb1, 0x57, 0x0c, 0xb7, 0x89, 0xbb, 0xb9, 0x15, 0x91, 0x7a,
+ 0xf2, 0x73, 0x8e, 0xef, 0x15, 0xc3, 0x1f, 0x17, 0x60, 0x34, 0x51, 0x3b, 0x6a, 0xc0, 0x70, 0x43,
+ 0x37, 0x54, 0x88, 0x79, 0xfa, 0x40, 0x36, 0x0e, 0xe1, 0x05, 0xae, 0x81, 0xb0, 0xc9, 0x1c, 0x4d,
+ 0x03, 0x28, 0xcb, 0xbd, 0x54, 0x57, 0xb3, 0x9b, 0x87, 0x32, 0xed, 0x87, 0x58, 0xa3, 0x40, 0x2f,
+ 0xc1, 0x60, 0xe4, 0x37, 0xfd, 0x86, 0xbf, 0xb9, 0x77, 0x9d, 0xc8, 0x80, 0x6a, 0xca, 0xb7, 0x73,
+ 0x2d, 0x46, 0x61, 0x9d, 0x0e, 0xdd, 0x85, 0x71, 0xc5, 0xa4, 0x7a, 0x04, 0xc6, 0x1b, 0xa6, 0xd9,
+ 0x58, 0x4d, 0x72, 0xc4, 0xe9, 0x4a, 0xec, 0x9f, 0x2d, 0xf2, 0x2e, 0xf6, 0x22, 0xf7, 0xbd, 0xd5,
+ 0xf0, 0xee, 0x5e, 0x0d, 0x5f, 0xb7, 0x60, 0x8c, 0xd6, 0xce, 0x1c, 0xd7, 0xa4, 0xa8, 0xa1, 0x22,
+ 0xa1, 0x5b, 0x6d, 0x22, 0xa1, 0x5f, 0xa4, 0xbb, 0x66, 0xdd, 0x6f, 0x45, 0x42, 0x7f, 0xa8, 0x6d,
+ 0x8b, 0x14, 0x8a, 0x05, 0x56, 0xd0, 0x91, 0x20, 0x10, 0x8f, 0x6d, 0x75, 0x3a, 0x12, 0x04, 0x58,
+ 0x60, 0x65, 0xa0, 0xf4, 0x9e, 0xec, 0x40, 0xe9, 0x3c, 0xde, 0xad, 0x70, 0x71, 0x12, 0x42, 0x9f,
+ 0x16, 0xef, 0x56, 0xfa, 0x3e, 0xc5, 0x34, 0xf6, 0x57, 0x8b, 0x30, 0x54, 0xf1, 0xeb, 0xb1, 0xd5,
+ 0xfe, 0x45, 0xc3, 0x6a, 0x7f, 0x21, 0x61, 0xb5, 0x1f, 0xd3, 0x69, 0xdf, 0xb3, 0xd1, 0x7f, 0xb3,
+ 0x6c, 0xf4, 0xbf, 0x6e, 0xb1, 0x51, 0x9b, 0x5f, 0xad, 0x72, 0x3f, 0x48, 0x74, 0x05, 0x06, 0xd9,
+ 0x06, 0xc3, 0x5e, 0x77, 0x4b, 0x53, 0x36, 0x4b, 0x5c, 0xb6, 0x1a, 0x83, 0xb1, 0x4e, 0x83, 0x2e,
+ 0xc1, 0x40, 0x48, 0x9c, 0xa0, 0xb6, 0xa5, 0x76, 0x57, 0x61, 0x77, 0xe6, 0x30, 0xac, 0xb0, 0xe8,
+ 0x8d, 0x38, 0xd4, 0x6a, 0x31, 0xff, 0xb5, 0xa8, 0xde, 0x1e, 0xbe, 0x44, 0xf2, 0xe3, 0xab, 0xda,
+ 0xb7, 0x01, 0xa5, 0xe9, 0xbb, 0x08, 0x06, 0x58, 0x36, 0x83, 0x01, 0x96, 0x52, 0x81, 0x00, 0xff,
+ 0xca, 0x82, 0x91, 0x8a, 0x5f, 0xa7, 0x4b, 0xf7, 0xdb, 0x69, 0x9d, 0xea, 0x71, 0xa6, 0xfb, 0xda,
+ 0xc4, 0x99, 0x7e, 0x0c, 0x7a, 0x2b, 0x7e, 0xbd, 0x43, 0xc0, 0xc2, 0xbf, 0x61, 0x41, 0x7f, 0xc5,
+ 0xaf, 0x1f, 0x83, 0x69, 0xe2, 0xc3, 0xa6, 0x69, 0xe2, 0x74, 0xce, 0xbc, 0xc9, 0xb1, 0x46, 0xfc,
+ 0xff, 0x3d, 0x30, 0x4c, 0xdb, 0xe9, 0x6f, 0xca, 0xa1, 0x34, 0xba, 0xcd, 0xea, 0xa2, 0xdb, 0xa8,
+ 0x14, 0xee, 0x37, 0x1a, 0xfe, 0x9d, 0xe4, 0xb0, 0x2e, 0x32, 0x28, 0x16, 0x58, 0xf4, 0x2c, 0x0c,
+ 0x34, 0x03, 0xb2, 0xeb, 0xfa, 0x42, 0xbc, 0xd5, 0x0c, 0x3d, 0x15, 0x01, 0xc7, 0x8a, 0x82, 0x5e,
+ 0x4d, 0x43, 0xd7, 0xa3, 0x47, 0x79, 0xcd, 0xf7, 0xea, 0x5c, 0x7b, 0x5f, 0x14, 0xc9, 0x50, 0x34,
+ 0x38, 0x36, 0xa8, 0xd0, 0x6d, 0x28, 0xb1, 0xff, 0x6c, 0xdb, 0x39, 0x7c, 0x1a, 0x66, 0x91, 0x1e,
+ 0x52, 0x30, 0xc0, 0x31, 0x2f, 0xf4, 0x3c, 0x40, 0x24, 0x13, 0x0a, 0x84, 0x22, 0x70, 0x9d, 0xba,
+ 0x0a, 0xa8, 0x54, 0x03, 0x21, 0xd6, 0xa8, 0xd0, 0x33, 0x50, 0x8a, 0x1c, 0xb7, 0xb1, 0xec, 0x7a,
+ 0xcc, 0xfe, 0x4b, 0xdb, 0x2f, 0xb2, 0x34, 0x0a, 0x20, 0x8e, 0xf1, 0x54, 0x14, 0x63, 0x41, 0x4d,
+ 0x78, 0x12, 0xfa, 0x01, 0x46, 0xcd, 0x44, 0xb1, 0x65, 0x05, 0xc5, 0x1a, 0x05, 0xda, 0x82, 0xb3,
+ 0xae, 0xc7, 0x12, 0x87, 0x90, 0xea, 0xb6, 0xdb, 0x5c, 0x5b, 0xae, 0xde, 0x22, 0x81, 0xbb, 0xb1,
+ 0x37, 0xeb, 0xd4, 0xb6, 0x89, 0x27, 0x13, 0xec, 0xca, 0xbc, 0xeb, 0x67, 0x97, 0xda, 0xd0, 0xe2,
+ 0xb6, 0x9c, 0xec, 0x17, 0xd8, 0x7c, 0xbf, 0x51, 0x45, 0x4f, 0x1b, 0x5b, 0xc7, 0x29, 0x7d, 0xeb,
+ 0x38, 0xd8, 0x2f, 0xf7, 0xdd, 0xa8, 0x6a, 0x31, 0x39, 0x5e, 0x86, 0x93, 0x15, 0xbf, 0x5e, 0xf1,
+ 0x83, 0x68, 0xd1, 0x0f, 0xee, 0x38, 0x41, 0x5d, 0x4e, 0xaf, 0xb2, 0x8c, 0x4a, 0x42, 0xf7, 0xcf,
+ 0x5e, 0xbe, 0xbb, 0x18, 0x11, 0x47, 0x5e, 0x60, 0x12, 0xdb, 0x21, 0xdf, 0xd2, 0xd5, 0x98, 0xec,
+ 0xa0, 0x52, 0xef, 0x5c, 0x75, 0x22, 0x82, 0x6e, 0xb0, 0x14, 0xfa, 0xf1, 0x31, 0x2a, 0x8a, 0x3f,
+ 0xa5, 0xa5, 0xd0, 0x8f, 0x91, 0x99, 0xe7, 0xae, 0x59, 0xde, 0xfe, 0x9c, 0xa8, 0x84, 0xeb, 0x01,
+ 0xb8, 0xbf, 0x62, 0x37, 0x39, 0xa8, 0x65, 0x6e, 0x8e, 0x42, 0x7e, 0x52, 0x07, 0x6e, 0x79, 0x6d,
+ 0x9b, 0x9b, 0xc3, 0xfe, 0x4e, 0x38, 0x95, 0xac, 0xbe, 0xeb, 0x44, 0xd8, 0x73, 0x30, 0x1e, 0xe8,
+ 0x05, 0xb5, 0x44, 0x67, 0x27, 0x79, 0x3e, 0x85, 0x04, 0x12, 0xa7, 0xe9, 0xed, 0x97, 0x60, 0x9c,
+ 0xde, 0x3d, 0x95, 0x20, 0xc7, 0x7a, 0xb9, 0x73, 0x78, 0x96, 0xff, 0xd8, 0xcb, 0x0e, 0xa2, 0x44,
+ 0xd6, 0x1b, 0xf4, 0x29, 0x18, 0x09, 0xc9, 0xb2, 0xeb, 0xb5, 0xee, 0x4a, 0xed, 0x53, 0x9b, 0x47,
+ 0xa4, 0xd5, 0x05, 0x9d, 0x92, 0xeb, 0xb0, 0x4d, 0x18, 0x4e, 0x70, 0x43, 0x3b, 0x30, 0x72, 0xc7,
+ 0xf5, 0xea, 0xfe, 0x9d, 0x50, 0xf2, 0x1f, 0xc8, 0x57, 0x65, 0xdf, 0xe6, 0x94, 0x89, 0x36, 0x1a,
+ 0xd5, 0xdd, 0x36, 0x98, 0xe1, 0x04, 0x73, 0xba, 0xd8, 0x83, 0x96, 0x37, 0x13, 0xde, 0x0c, 0x09,
+ 0x7f, 0x16, 0x28, 0x16, 0x3b, 0x96, 0x40, 0x1c, 0xe3, 0xe9, 0x62, 0x67, 0x7f, 0xae, 0x06, 0x7e,
+ 0x8b, 0xa7, 0x58, 0x11, 0x8b, 0x1d, 0x2b, 0x28, 0xd6, 0x28, 0xe8, 0x66, 0xc8, 0xfe, 0xad, 0xfa,
+ 0x1e, 0xf6, 0xfd, 0x48, 0x6e, 0x9f, 0x2c, 0x45, 0x98, 0x06, 0xc7, 0x06, 0x15, 0x5a, 0x04, 0x14,
+ 0xb6, 0x9a, 0xcd, 0x06, 0xf3, 0x4e, 0x73, 0x1a, 0x8c, 0x15, 0x77, 0xdb, 0x29, 0xf2, 0x10, 0xd1,
+ 0xd5, 0x14, 0x16, 0x67, 0x94, 0xa0, 0xe7, 0xe2, 0x86, 0x68, 0x6a, 0x2f, 0x6b, 0x2a, 0x37, 0x7b,
+ 0x55, 0x79, 0x3b, 0x25, 0x0e, 0x2d, 0x40, 0x7f, 0xb8, 0x17, 0xd6, 0xa2, 0x46, 0xd8, 0x2e, 0x21,
+ 0x5b, 0x95, 0x91, 0x68, 0xf9, 0x40, 0x79, 0x11, 0x2c, 0xcb, 0xa2, 0x1a, 0x4c, 0x08, 0x8e, 0x73,
+ 0x5b, 0x8e, 0xa7, 0xd2, 0x44, 0x71, 0x27, 0xfd, 0x2b, 0xf7, 0xf7, 0xcb, 0x13, 0xa2, 0x66, 0x1d,
+ 0x7d, 0xb0, 0x5f, 0xa6, 0x8b, 0x23, 0x03, 0x83, 0xb3, 0xb8, 0xf1, 0xc9, 0x57, 0xab, 0xf9, 0x3b,
+ 0xcd, 0x4a, 0xe0, 0x6f, 0xb8, 0x0d, 0xd2, 0xce, 0x74, 0x58, 0x35, 0x28, 0xc5, 0xe4, 0x33, 0x60,
+ 0x38, 0xc1, 0xcd, 0xfe, 0x1c, 0x93, 0x1d, 0xab, 0xee, 0xa6, 0xe7, 0x44, 0xad, 0x80, 0xa0, 0x1d,
+ 0x18, 0x6e, 0xb2, 0xdd, 0x45, 0x24, 0x3e, 0x11, 0x73, 0xfd, 0xc5, 0x2e, 0xd5, 0x4f, 0x77, 0x58,
+ 0xea, 0x36, 0xc3, 0xd5, 0xad, 0xa2, 0xb3, 0xc3, 0x26, 0x77, 0xfb, 0x5f, 0x9c, 0x61, 0xd2, 0x47,
+ 0x95, 0xeb, 0x94, 0xfa, 0xc5, 0x9b, 0x20, 0x71, 0x8d, 0x9d, 0xca, 0x57, 0xb0, 0xc6, 0xc3, 0x22,
+ 0xde, 0x15, 0x61, 0x59, 0x16, 0x7d, 0x12, 0x46, 0xe8, 0xad, 0x50, 0x49, 0x00, 0xe1, 0xe4, 0x89,
+ 0xfc, 0xd8, 0x2d, 0x8a, 0x4a, 0x4f, 0x8a, 0xa4, 0x17, 0xc6, 0x09, 0x66, 0xe8, 0x0d, 0xe6, 0x5a,
+ 0x26, 0x59, 0x17, 0xba, 0x61, 0xad, 0x7b, 0x91, 0x49, 0xb6, 0x1a, 0x13, 0xd4, 0x82, 0x89, 0x74,
+ 0xea, 0xc7, 0x70, 0xd2, 0xce, 0x17, 0xaf, 0xd3, 0xd9, 0x1b, 0xe3, 0xec, 0x35, 0x69, 0x5c, 0x88,
+ 0xb3, 0xf8, 0xa3, 0xe5, 0x64, 0x62, 0xbe, 0xa2, 0xa1, 0xf7, 0x4d, 0x25, 0xe7, 0x1b, 0x6e, 0x9b,
+ 0x93, 0x6f, 0x13, 0xce, 0x69, 0xb9, 0xcd, 0xae, 0x06, 0x0e, 0x73, 0xde, 0x70, 0xd9, 0x76, 0xaa,
+ 0xc9, 0x45, 0x8f, 0xde, 0xdf, 0x2f, 0x9f, 0x5b, 0x6b, 0x47, 0x88, 0xdb, 0xf3, 0x41, 0x37, 0xe0,
+ 0x24, 0x8f, 0x3c, 0x30, 0x4f, 0x9c, 0x7a, 0xc3, 0xf5, 0x94, 0xe0, 0xc5, 0x97, 0xfc, 0x99, 0xfb,
+ 0xfb, 0xe5, 0x93, 0x33, 0x59, 0x04, 0x38, 0xbb, 0x1c, 0xfa, 0x30, 0x94, 0xea, 0x5e, 0x28, 0xfa,
+ 0xa0, 0xcf, 0x48, 0x1f, 0x57, 0x9a, 0x5f, 0xad, 0xaa, 0xef, 0x8f, 0xff, 0xe0, 0xb8, 0x00, 0xda,
+ 0xe4, 0xb6, 0x01, 0xa5, 0x2d, 0xea, 0x4f, 0x45, 0x5e, 0x4b, 0x2a, 0x54, 0x8d, 0xb7, 0xc7, 0xdc,
+ 0x28, 0xa6, 0x9e, 0xe4, 0x18, 0xcf, 0x92, 0x0d, 0xc6, 0xe8, 0x75, 0x40, 0x22, 0x4d, 0xc1, 0x4c,
+ 0x8d, 0x65, 0xd5, 0x61, 0x47, 0xe3, 0x80, 0xf9, 0x1a, 0xb6, 0x9a, 0xa2, 0xc0, 0x19, 0xa5, 0xd0,
+ 0x35, 0xba, 0xab, 0xe8, 0x50, 0xb1, 0x6b, 0xa9, 0x24, 0xa5, 0xf3, 0xa4, 0x19, 0x10, 0xe6, 0x63,
+ 0x66, 0x72, 0xc4, 0x89, 0x72, 0xa8, 0x0e, 0x67, 0x9d, 0x56, 0xe4, 0x33, 0xb3, 0x8b, 0x49, 0xba,
+ 0xe6, 0x6f, 0x13, 0x8f, 0x59, 0x3c, 0x07, 0x66, 0x2f, 0x50, 0xc9, 0x6e, 0xa6, 0x0d, 0x1d, 0x6e,
+ 0xcb, 0x85, 0x4a, 0xe4, 0x2a, 0x2b, 0x39, 0x98, 0xf1, 0xe4, 0x32, 0x32, 0x93, 0xbf, 0x04, 0x83,
+ 0x5b, 0x7e, 0x18, 0xad, 0x92, 0xe8, 0x8e, 0x1f, 0x6c, 0x8b, 0xb8, 0xc8, 0x71, 0x2c, 0xfa, 0x18,
+ 0x85, 0x75, 0x3a, 0x7a, 0xe5, 0x66, 0xfe, 0x38, 0x4b, 0xf3, 0xcc, 0x15, 0x62, 0x20, 0xde, 0x63,
+ 0xae, 0x71, 0x30, 0x96, 0x78, 0x49, 0xba, 0x54, 0x99, 0x63, 0x6e, 0x0d, 0x09, 0xd2, 0xa5, 0xca,
+ 0x1c, 0x96, 0x78, 0x3a, 0x5d, 0xc3, 0x2d, 0x27, 0x20, 0x95, 0xc0, 0xaf, 0x91, 0x50, 0xcb, 0x80,
+ 0xf0, 0x08, 0x8f, 0xfa, 0x4c, 0xa7, 0x6b, 0x35, 0x8b, 0x00, 0x67, 0x97, 0x43, 0x24, 0x9d, 0xd7,
+ 0x6f, 0x24, 0xdf, 0x1e, 0x95, 0x96, 0x67, 0xba, 0x4c, 0xed, 0xe7, 0xc1, 0x98, 0xca, 0x28, 0xc8,
+ 0xe3, 0x3c, 0x87, 0x93, 0xa3, 0x6c, 0x6e, 0x77, 0x1f, 0x24, 0x5a, 0x59, 0xf8, 0x96, 0x12, 0x9c,
+ 0x70, 0x8a, 0xb7, 0x11, 0x32, 0x70, 0xac, 0x63, 0xc8, 0xc0, 0xcb, 0x50, 0x0a, 0x5b, 0xeb, 0x75,
+ 0x7f, 0xc7, 0x71, 0x3d, 0xe6, 0xd6, 0xa0, 0xdd, 0xfd, 0xaa, 0x12, 0x81, 0x63, 0x1a, 0xb4, 0x08,
+ 0x03, 0x8e, 0x34, 0xdf, 0xa1, 0xfc, 0x20, 0x51, 0xca, 0x68, 0xc7, 0xe3, 0xa6, 0x48, 0x83, 0x9d,
+ 0x2a, 0x8b, 0x5e, 0x85, 0x61, 0xf1, 0x72, 0x5e, 0x24, 0xe1, 0x9d, 0x30, 0x9f, 0x37, 0x56, 0x75,
+ 0x24, 0x36, 0x69, 0xd1, 0x4d, 0x18, 0x8c, 0xfc, 0x06, 0x7b, 0xa3, 0x47, 0xc5, 0xbc, 0x53, 0xf9,
+ 0xe1, 0x0e, 0xd7, 0x14, 0x99, 0xae, 0xb5, 0x56, 0x45, 0xb1, 0xce, 0x07, 0xad, 0xf1, 0xf9, 0xce,
+ 0xf2, 0x1d, 0x90, 0x50, 0x64, 0x71, 0x3d, 0x97, 0xe7, 0x93, 0xc6, 0xc8, 0xcc, 0xe5, 0x20, 0x4a,
+ 0x62, 0x9d, 0x0d, 0xba, 0x0a, 0xe3, 0xcd, 0xc0, 0xf5, 0xd9, 0x9c, 0x50, 0x96, 0xdb, 0x49, 0x33,
+ 0xbb, 0x59, 0x25, 0x49, 0x80, 0xd3, 0x65, 0x58, 0xe0, 0x03, 0x01, 0x9c, 0x3c, 0xc3, 0x33, 0xb4,
+ 0xf0, 0xab, 0x34, 0x87, 0x61, 0x85, 0x45, 0x2b, 0x6c, 0x27, 0xe6, 0x5a, 0xa0, 0xc9, 0xa9, 0xfc,
+ 0xb8, 0x54, 0xba, 0xb6, 0x88, 0x0b, 0xaf, 0xea, 0x2f, 0x8e, 0x39, 0xa0, 0xba, 0x96, 0x18, 0x95,
+ 0x5e, 0x01, 0xc2, 0xc9, 0xb3, 0x6d, 0x9c, 0x22, 0x13, 0xb7, 0xb2, 0x58, 0x20, 0x30, 0xc0, 0x21,
+ 0x4e, 0xf0, 0x44, 0x1f, 0x85, 0x31, 0x11, 0x4d, 0x33, 0xee, 0xa6, 0x73, 0xf1, 0xcb, 0x07, 0x9c,
+ 0xc0, 0xe1, 0x14, 0x35, 0xcf, 0x90, 0xe2, 0xac, 0x37, 0x88, 0xd8, 0xfa, 0x96, 0x5d, 0x6f, 0x3b,
+ 0x9c, 0x3c, 0xcf, 0xf6, 0x07, 0x91, 0x21, 0x25, 0x89, 0xc5, 0x19, 0x25, 0xd0, 0x1a, 0x8c, 0x35,
+ 0x03, 0x42, 0x76, 0x98, 0xa0, 0x2f, 0xce, 0xb3, 0x32, 0x8f, 0xfb, 0x41, 0x5b, 0x52, 0x49, 0xe0,
+ 0x0e, 0x32, 0x60, 0x38, 0xc5, 0x01, 0xdd, 0x81, 0x01, 0x7f, 0x97, 0x04, 0x5b, 0xc4, 0xa9, 0x4f,
+ 0x5e, 0x68, 0xf3, 0x12, 0x47, 0x1c, 0x6e, 0x37, 0x04, 0x6d, 0xc2, 0xdb, 0x43, 0x82, 0x3b, 0x7b,
+ 0x7b, 0xc8, 0xca, 0xd0, 0xff, 0x61, 0xc1, 0x19, 0x69, 0x9c, 0xa9, 0x36, 0x69, 0xaf, 0xcf, 0xf9,
+ 0x5e, 0x18, 0x05, 0x3c, 0x52, 0xc5, 0xa3, 0xf9, 0xd1, 0x1b, 0xd6, 0x72, 0x0a, 0x29, 0x45, 0xf4,
+ 0x99, 0x3c, 0x8a, 0x10, 0xe7, 0xd7, 0x48, 0xaf, 0xa6, 0x21, 0x89, 0xe4, 0x66, 0x34, 0x13, 0x2e,
+ 0xbe, 0x31, 0xbf, 0x3a, 0xf9, 0x18, 0x0f, 0xb3, 0x41, 0x17, 0x43, 0x35, 0x89, 0xc4, 0x69, 0x7a,
+ 0x74, 0x05, 0x0a, 0x7e, 0x38, 0xf9, 0x78, 0x9b, 0x5c, 0xba, 0x7e, 0xfd, 0x46, 0x95, 0x7b, 0xfd,
+ 0xdd, 0xa8, 0xe2, 0x82, 0x1f, 0xca, 0x2c, 0x25, 0xf4, 0x3e, 0x16, 0x4e, 0x3e, 0xc1, 0xd5, 0x96,
+ 0x32, 0x4b, 0x09, 0x03, 0xe2, 0x18, 0x8f, 0xb6, 0x60, 0x34, 0x34, 0xee, 0xbd, 0xe1, 0xe4, 0x45,
+ 0xd6, 0x53, 0x4f, 0xe4, 0x0d, 0x9a, 0x41, 0xad, 0xa5, 0x0f, 0x30, 0xb9, 0xe0, 0x24, 0x5b, 0xbe,
+ 0xba, 0xb4, 0x9b, 0x77, 0x38, 0xf9, 0x64, 0x87, 0xd5, 0xa5, 0x11, 0xeb, 0xab, 0x4b, 0xe7, 0x81,
+ 0x13, 0x3c, 0xa7, 0xbe, 0x03, 0xc6, 0x53, 0xe2, 0xd2, 0x61, 0x3c, 0xdc, 0xa7, 0xb6, 0x61, 0xd8,
+ 0x98, 0x92, 0x0f, 0xd5, 0xbb, 0xe2, 0xb7, 0x4b, 0x50, 0x52, 0x56, 0x6f, 0x74, 0xd9, 0x74, 0xa8,
+ 0x38, 0x93, 0x74, 0xa8, 0x18, 0xa8, 0xf8, 0x75, 0xc3, 0x87, 0x62, 0x2d, 0x23, 0x18, 0x63, 0xde,
+ 0x06, 0xd8, 0xfd, 0x23, 0x15, 0xcd, 0x94, 0x50, 0xec, 0xda, 0x33, 0xa3, 0xa7, 0xad, 0x75, 0xe2,
+ 0x2a, 0x8c, 0x7b, 0x3e, 0x93, 0xd1, 0x49, 0x5d, 0x0a, 0x60, 0x4c, 0xce, 0x2a, 0xe9, 0xd1, 0x8d,
+ 0x12, 0x04, 0x38, 0x5d, 0x86, 0x56, 0xc8, 0x05, 0xa5, 0xa4, 0x39, 0x84, 0xcb, 0x51, 0x58, 0x60,
+ 0xe9, 0xdd, 0x90, 0xff, 0x0a, 0x27, 0xc7, 0xf2, 0xef, 0x86, 0xbc, 0x50, 0x52, 0x18, 0x0b, 0xa5,
+ 0x30, 0xc6, 0xb4, 0xff, 0x4d, 0xbf, 0xbe, 0x54, 0x11, 0x62, 0xbe, 0x16, 0x49, 0xb8, 0xbe, 0x54,
+ 0xc1, 0x1c, 0x87, 0x66, 0xa0, 0x8f, 0xfd, 0x08, 0x27, 0x87, 0xf2, 0xa3, 0xe1, 0xb0, 0x12, 0x5a,
+ 0x96, 0x34, 0x56, 0x00, 0x8b, 0x82, 0x4c, 0xbb, 0x4b, 0xef, 0x46, 0x4c, 0xbb, 0xdb, 0xff, 0x80,
+ 0xda, 0x5d, 0xc9, 0x00, 0xc7, 0xbc, 0xd0, 0x5d, 0x38, 0x69, 0xdc, 0x47, 0xd5, 0xab, 0x1d, 0xc8,
+ 0x37, 0xfc, 0x26, 0x88, 0x67, 0xcf, 0x89, 0x46, 0x9f, 0x5c, 0xca, 0xe2, 0x84, 0xb3, 0x2b, 0x40,
+ 0x0d, 0x18, 0xaf, 0xa5, 0x6a, 0x1d, 0xe8, 0xbe, 0x56, 0x35, 0x2f, 0xd2, 0x35, 0xa6, 0x19, 0xa3,
+ 0x57, 0x61, 0xe0, 0x6d, 0x3f, 0x64, 0x47, 0xa4, 0xb8, 0x9a, 0xc8, 0x70, 0x0e, 0x03, 0x6f, 0xdc,
+ 0xa8, 0x32, 0xf8, 0xc1, 0x7e, 0x79, 0xb0, 0xe2, 0xd7, 0xe5, 0x5f, 0xac, 0x0a, 0xa0, 0xef, 0xb7,
+ 0x60, 0x2a, 0x7d, 0xe1, 0x55, 0x8d, 0x1e, 0xee, 0xbe, 0xd1, 0xb6, 0xa8, 0x74, 0x6a, 0x21, 0x97,
+ 0x1d, 0x6e, 0x53, 0x15, 0xfa, 0x10, 0x5d, 0x4f, 0xa1, 0x7b, 0x8f, 0x88, 0x14, 0xb3, 0x8f, 0xc6,
+ 0xeb, 0x89, 0x42, 0x0f, 0xf6, 0xcb, 0xa3, 0x7c, 0x67, 0x74, 0xef, 0xc9, 0xe7, 0x4d, 0xa2, 0x00,
+ 0xfa, 0x4e, 0x38, 0x19, 0xa4, 0x35, 0xa8, 0x44, 0x0a, 0xe1, 0x4f, 0x77, 0xb3, 0xcb, 0x26, 0x07,
+ 0x1c, 0x67, 0x31, 0xc4, 0xd9, 0xf5, 0xd8, 0xbf, 0x62, 0x31, 0xfd, 0xb6, 0x68, 0x16, 0x09, 0x5b,
+ 0x8d, 0xe3, 0x48, 0x6c, 0xbd, 0x60, 0xd8, 0x8e, 0x1f, 0xd8, 0xb1, 0xe8, 0x1f, 0x59, 0xcc, 0xb1,
+ 0xe8, 0x18, 0x5f, 0x31, 0xbd, 0x01, 0x03, 0x91, 0x4c, 0x38, 0xde, 0x26, 0x17, 0xb7, 0xd6, 0x28,
+ 0xe6, 0x5c, 0xa5, 0x2e, 0x39, 0x2a, 0xb7, 0xb8, 0x62, 0x63, 0xff, 0x7d, 0x3e, 0x02, 0x12, 0x73,
+ 0x0c, 0x26, 0xba, 0x79, 0xd3, 0x44, 0x57, 0xee, 0xf0, 0x05, 0x39, 0xa6, 0xba, 0xbf, 0x67, 0xb6,
+ 0x9b, 0x29, 0xf7, 0xde, 0xed, 0x1e, 0x6d, 0xf6, 0x17, 0x2c, 0x80, 0x38, 0xc8, 0x7c, 0x17, 0x29,
+ 0x25, 0x5f, 0xa6, 0xd7, 0x1a, 0x3f, 0xf2, 0x6b, 0x7e, 0x43, 0x18, 0x28, 0xce, 0xc6, 0x56, 0x42,
+ 0x0e, 0x3f, 0xd0, 0x7e, 0x63, 0x45, 0x8d, 0xca, 0x32, 0xa4, 0x65, 0x31, 0xb6, 0x5b, 0x1b, 0xe1,
+ 0x2c, 0xbf, 0x64, 0xc1, 0x89, 0x2c, 0x97, 0x78, 0x7a, 0x49, 0xe6, 0x6a, 0x4e, 0xe5, 0x6d, 0xa8,
+ 0x46, 0xf3, 0x96, 0x80, 0x63, 0x45, 0xd1, 0x75, 0xae, 0xce, 0xc3, 0x45, 0x77, 0xbf, 0x01, 0xc3,
+ 0x95, 0x80, 0x68, 0xf2, 0xc5, 0x6b, 0x3c, 0x4c, 0x0a, 0x6f, 0xcf, 0xb3, 0x87, 0x0e, 0x91, 0x62,
+ 0x7f, 0xb9, 0x00, 0x27, 0xb8, 0xd3, 0xce, 0xcc, 0xae, 0xef, 0xd6, 0x2b, 0x7e, 0x5d, 0x3c, 0x64,
+ 0x7c, 0x13, 0x86, 0x9a, 0x9a, 0x6e, 0xba, 0x5d, 0xa4, 0x62, 0x5d, 0x87, 0x1d, 0x6b, 0xd3, 0x74,
+ 0x28, 0x36, 0x78, 0xa1, 0x3a, 0x0c, 0x91, 0x5d, 0xb7, 0xa6, 0x3c, 0x3f, 0x0a, 0x87, 0x3e, 0xa4,
+ 0x55, 0x2d, 0x0b, 0x1a, 0x1f, 0x6c, 0x70, 0x7d, 0x08, 0x19, 0xf4, 0xed, 0x1f, 0xb5, 0xe0, 0x74,
+ 0x4e, 0x5c, 0x63, 0x5a, 0xdd, 0x1d, 0xe6, 0x1e, 0x25, 0xa6, 0xad, 0xaa, 0x8e, 0x3b, 0x4d, 0x61,
+ 0x81, 0x45, 0x1f, 0x03, 0xe0, 0x4e, 0x4f, 0xc4, 0xab, 0x75, 0x0c, 0x00, 0x6b, 0xc4, 0xae, 0xd4,
+ 0xc2, 0x10, 0xca, 0xf2, 0x58, 0xe3, 0x65, 0x7f, 0xa9, 0x07, 0x7a, 0x99, 0x93, 0x0d, 0xaa, 0x40,
+ 0xff, 0x16, 0xcf, 0x54, 0xd5, 0x76, 0xdc, 0x28, 0xad, 0x4c, 0x7e, 0x15, 0x8f, 0x9b, 0x06, 0xc5,
+ 0x92, 0x0d, 0x5a, 0x81, 0x09, 0x9e, 0x30, 0xac, 0x31, 0x4f, 0x1a, 0xce, 0x9e, 0x54, 0xfb, 0xf2,
+ 0x1c, 0xd8, 0x4a, 0xfd, 0xbd, 0x94, 0x26, 0xc1, 0x59, 0xe5, 0xd0, 0x6b, 0x30, 0x42, 0xaf, 0xe1,
+ 0x7e, 0x2b, 0x92, 0x9c, 0x78, 0xaa, 0x30, 0x75, 0x33, 0x59, 0x33, 0xb0, 0x38, 0x41, 0x8d, 0x5e,
+ 0x85, 0xe1, 0x66, 0x4a, 0xc1, 0xdd, 0x1b, 0x6b, 0x82, 0x4c, 0xa5, 0xb6, 0x49, 0xcb, 0xbc, 0xe2,
+ 0x5b, 0xec, 0x0d, 0xc0, 0xda, 0x56, 0x40, 0xc2, 0x2d, 0xbf, 0x51, 0x67, 0x12, 0x70, 0xaf, 0xe6,
+ 0x15, 0x9f, 0xc0, 0xe3, 0x54, 0x09, 0xca, 0x65, 0xc3, 0x71, 0x1b, 0xad, 0x80, 0xc4, 0x5c, 0xfa,
+ 0x4c, 0x2e, 0x8b, 0x09, 0x3c, 0x4e, 0x95, 0xe8, 0xac, 0xb9, 0xef, 0x3f, 0x1a, 0xcd, 0xbd, 0xfd,
+ 0xd3, 0x05, 0x30, 0x86, 0xf6, 0xdb, 0x37, 0x85, 0x19, 0xfd, 0xb2, 0xcd, 0xa0, 0x59, 0x13, 0x0e,
+ 0x65, 0x99, 0x5f, 0x16, 0xe7, 0x2f, 0xe6, 0x5f, 0x46, 0xff, 0x63, 0x56, 0x8a, 0xae, 0xf1, 0x93,
+ 0x95, 0xc0, 0xa7, 0x87, 0x9c, 0x0c, 0xa4, 0xa7, 0x1e, 0x9f, 0xf4, 0xcb, 0x20, 0x03, 0x6d, 0x42,
+ 0xce, 0x0a, 0xf7, 0x7c, 0xce, 0xc1, 0xf0, 0xbd, 0xaa, 0x8a, 0x68, 0x1f, 0x92, 0x0b, 0xba, 0x02,
+ 0x83, 0x22, 0x2f, 0x15, 0x7b, 0x23, 0xc1, 0x17, 0x13, 0xf3, 0x15, 0x9b, 0x8f, 0xc1, 0x58, 0xa7,
+ 0xb1, 0x7f, 0xa0, 0x00, 0x13, 0x19, 0x8f, 0xdc, 0xf8, 0x31, 0xb2, 0xe9, 0x86, 0x91, 0x4a, 0x91,
+ 0xac, 0x1d, 0x23, 0x1c, 0x8e, 0x15, 0x05, 0xdd, 0xab, 0xf8, 0x41, 0x95, 0x3c, 0x9c, 0xc4, 0x23,
+ 0x12, 0x81, 0x3d, 0x64, 0xb2, 0xe1, 0x0b, 0xd0, 0xd3, 0x0a, 0x89, 0x0c, 0x16, 0xad, 0x8e, 0x6d,
+ 0x66, 0xd6, 0x66, 0x18, 0x7a, 0x05, 0xdc, 0x54, 0x16, 0x62, 0xed, 0x0a, 0xc8, 0x6d, 0xc4, 0x1c,
+ 0x47, 0x1b, 0x17, 0x11, 0xcf, 0xf1, 0x22, 0x71, 0x51, 0x8c, 0xa3, 0x9e, 0x32, 0x28, 0x16, 0x58,
+ 0xfb, 0x8b, 0x45, 0x38, 0x93, 0xfb, 0xec, 0x95, 0x36, 0x7d, 0xc7, 0xf7, 0xdc, 0xc8, 0x57, 0x4e,
+ 0x78, 0x3c, 0xd2, 0x29, 0x69, 0x6e, 0xad, 0x08, 0x38, 0x56, 0x14, 0xe8, 0x22, 0xf4, 0x32, 0xa5,
+ 0x78, 0x2a, 0x59, 0xf4, 0xec, 0x3c, 0x0f, 0x7d, 0xc7, 0xd1, 0x5d, 0xe7, 0xf7, 0x7f, 0x8c, 0x4a,
+ 0x30, 0x7e, 0x23, 0x79, 0xa0, 0xd0, 0xe6, 0xfa, 0x7e, 0x03, 0x33, 0x24, 0x7a, 0x42, 0xf4, 0x57,
+ 0xc2, 0xeb, 0x0c, 0x3b, 0x75, 0x3f, 0xd4, 0x3a, 0xed, 0x29, 0xe8, 0xdf, 0x26, 0x7b, 0x81, 0xeb,
+ 0x6d, 0x26, 0xbd, 0x11, 0xaf, 0x73, 0x30, 0x96, 0x78, 0x33, 0x6f, 0x69, 0xff, 0x51, 0x27, 0xe6,
+ 0x1f, 0xe8, 0x28, 0x9e, 0xfc, 0x50, 0x11, 0x46, 0xf1, 0xec, 0xfc, 0x7b, 0x03, 0x71, 0x33, 0x3d,
+ 0x10, 0x47, 0x9d, 0x98, 0xbf, 0xf3, 0x68, 0xfc, 0xa2, 0x05, 0xa3, 0x2c, 0x3b, 0x96, 0x88, 0x59,
+ 0xe1, 0xfa, 0xde, 0x31, 0x5c, 0x05, 0x1e, 0x83, 0xde, 0x80, 0x56, 0x9a, 0xcc, 0x12, 0xcd, 0x5a,
+ 0x82, 0x39, 0x0e, 0x9d, 0x85, 0x1e, 0xd6, 0x04, 0x3a, 0x78, 0x43, 0x7c, 0x0b, 0x9e, 0x77, 0x22,
+ 0x07, 0x33, 0x28, 0x0b, 0xfc, 0x86, 0x49, 0xb3, 0xe1, 0xf2, 0x46, 0xc7, 0x2e, 0x0b, 0xef, 0x8e,
+ 0x80, 0x18, 0x99, 0x4d, 0x7b, 0x67, 0x81, 0xdf, 0xb2, 0x59, 0xb6, 0xbf, 0x66, 0xff, 0x79, 0x01,
+ 0xce, 0x67, 0x96, 0xeb, 0x3a, 0xf0, 0x5b, 0xfb, 0xd2, 0x0f, 0x33, 0xff, 0x51, 0xf1, 0x18, 0x7d,
+ 0xbd, 0x7b, 0xba, 0x95, 0xfe, 0x7b, 0xbb, 0x88, 0xc7, 0x96, 0xd9, 0x65, 0xef, 0x92, 0x78, 0x6c,
+ 0x99, 0x6d, 0xcb, 0x51, 0x13, 0xfc, 0x75, 0x21, 0xe7, 0x5b, 0x98, 0xc2, 0xe0, 0x12, 0xdd, 0x67,
+ 0x18, 0x32, 0x94, 0x97, 0x70, 0xbe, 0xc7, 0x70, 0x18, 0x56, 0x58, 0x34, 0x03, 0xa3, 0x3b, 0xae,
+ 0x47, 0x37, 0x9f, 0x3d, 0x53, 0x14, 0x57, 0xb6, 0x8c, 0x15, 0x13, 0x8d, 0x93, 0xf4, 0xc8, 0xd5,
+ 0x62, 0xb5, 0xf1, 0xaf, 0x7b, 0xf5, 0x50, 0xab, 0x6e, 0xda, 0x74, 0xe7, 0x50, 0xbd, 0x98, 0x11,
+ 0xb7, 0x6d, 0x45, 0xd3, 0x13, 0x15, 0xbb, 0xd7, 0x13, 0x0d, 0x65, 0xeb, 0x88, 0xa6, 0x5e, 0x85,
+ 0xe1, 0x07, 0xb6, 0x8d, 0xd8, 0x5f, 0x2f, 0xc2, 0x23, 0x6d, 0x96, 0x3d, 0xdf, 0xeb, 0x8d, 0x31,
+ 0xd0, 0xf6, 0xfa, 0xd4, 0x38, 0x54, 0xe0, 0xc4, 0x46, 0xab, 0xd1, 0xd8, 0x63, 0x4f, 0xa0, 0x48,
+ 0x5d, 0x52, 0x08, 0x99, 0x52, 0x2a, 0x47, 0x4e, 0x2c, 0x66, 0xd0, 0xe0, 0xcc, 0x92, 0xf4, 0x8a,
+ 0x45, 0x4f, 0x92, 0x3d, 0xc5, 0x2a, 0x71, 0xc5, 0xc2, 0x3a, 0x12, 0x9b, 0xb4, 0xe8, 0x2a, 0x8c,
+ 0x3b, 0xbb, 0x8e, 0xcb, 0x03, 0xde, 0x4b, 0x06, 0xfc, 0x8e, 0xa5, 0x74, 0xd1, 0x33, 0x49, 0x02,
+ 0x9c, 0x2e, 0x83, 0x5e, 0x07, 0xe4, 0xaf, 0xb3, 0x87, 0x12, 0xf5, 0xab, 0xc4, 0x13, 0x56, 0x77,
+ 0x36, 0x76, 0xc5, 0x78, 0x4b, 0xb8, 0x91, 0xa2, 0xc0, 0x19, 0xa5, 0x12, 0x81, 0xc9, 0xfa, 0xf2,
+ 0x03, 0x93, 0xb5, 0xdf, 0x17, 0x3b, 0xa6, 0xde, 0xba, 0x02, 0xc3, 0x87, 0x74, 0xff, 0xb5, 0xff,
+ 0x8d, 0x05, 0x4a, 0x41, 0x6c, 0x46, 0xfd, 0x7d, 0x95, 0xf9, 0x27, 0x73, 0xd5, 0xb6, 0x16, 0x2d,
+ 0xe9, 0xa4, 0xe6, 0x9f, 0x1c, 0x23, 0xb1, 0x49, 0xcb, 0xe7, 0x90, 0xe6, 0x57, 0x6c, 0xdc, 0x0a,
+ 0x44, 0x68, 0x42, 0x45, 0x81, 0x3e, 0x0e, 0xfd, 0x75, 0x77, 0xd7, 0x0d, 0x85, 0x72, 0xec, 0xd0,
+ 0xc6, 0xb8, 0x78, 0xeb, 0x9c, 0xe7, 0x6c, 0xb0, 0xe4, 0x67, 0xff, 0x50, 0x21, 0xee, 0x93, 0x37,
+ 0x5a, 0x7e, 0xe4, 0x1c, 0xc3, 0x49, 0x7e, 0xd5, 0x38, 0xc9, 0x9f, 0x68, 0x17, 0x9f, 0x91, 0x35,
+ 0x29, 0xf7, 0x04, 0xbf, 0x91, 0x38, 0xc1, 0x9f, 0xec, 0xcc, 0xaa, 0xfd, 0xc9, 0xfd, 0x0f, 0x2c,
+ 0x18, 0x37, 0xe8, 0x8f, 0xe1, 0x00, 0x59, 0x34, 0x0f, 0x90, 0x47, 0x3b, 0x7e, 0x43, 0xce, 0xc1,
+ 0xf1, 0xbd, 0xc5, 0x44, 0xdb, 0xd9, 0x81, 0xf1, 0x36, 0xf4, 0x6c, 0x39, 0x41, 0xbd, 0x5d, 0x3e,
+ 0x9a, 0x54, 0xa1, 0xe9, 0x6b, 0x4e, 0x20, 0x3c, 0x15, 0x9e, 0x95, 0xbd, 0x4e, 0x41, 0x1d, 0xbd,
+ 0x14, 0x58, 0x55, 0xe8, 0x65, 0xe8, 0x0b, 0x6b, 0x7e, 0x53, 0xbd, 0x99, 0xba, 0xc0, 0x3a, 0x9a,
+ 0x41, 0x0e, 0xf6, 0xcb, 0xc8, 0xac, 0x8e, 0x82, 0xb1, 0xa0, 0x47, 0x6f, 0xc2, 0x30, 0xfb, 0xa5,
+ 0xdc, 0x06, 0x8b, 0xf9, 0x1a, 0x8c, 0xaa, 0x4e, 0xc8, 0x7d, 0x6a, 0x0d, 0x10, 0x36, 0x59, 0x4d,
+ 0x6d, 0x42, 0x49, 0x7d, 0xd6, 0x43, 0xb5, 0x76, 0xff, 0xab, 0x22, 0x4c, 0x64, 0xcc, 0x39, 0x14,
+ 0x1a, 0x23, 0x71, 0xa5, 0xcb, 0xa9, 0xfa, 0x0e, 0xc7, 0x22, 0x64, 0x17, 0xa8, 0xba, 0x98, 0x5b,
+ 0x5d, 0x57, 0x7a, 0x33, 0x24, 0xc9, 0x4a, 0x29, 0xa8, 0x73, 0xa5, 0xb4, 0xb2, 0x63, 0xeb, 0x6a,
+ 0x5a, 0x91, 0x6a, 0xe9, 0x43, 0x1d, 0xd3, 0x5f, 0xef, 0x81, 0x13, 0x59, 0x21, 0x63, 0xd1, 0x67,
+ 0x13, 0xd9, 0x90, 0x5f, 0xec, 0x36, 0xd8, 0x2c, 0x4f, 0x91, 0x2c, 0xc2, 0x40, 0x4e, 0x9b, 0xf9,
+ 0x91, 0x3b, 0x76, 0xb3, 0xa8, 0x93, 0x05, 0xa0, 0x09, 0x78, 0x16, 0x6b, 0xb9, 0x7d, 0x7c, 0xa0,
+ 0xeb, 0x06, 0x88, 0xf4, 0xd7, 0x61, 0xc2, 0x25, 0x49, 0x82, 0x3b, 0xbb, 0x24, 0xc9, 0x9a, 0xd1,
+ 0x12, 0xf4, 0xd5, 0xb8, 0xaf, 0x4b, 0xb1, 0xf3, 0x16, 0xc6, 0x1d, 0x5d, 0xd4, 0x06, 0x2c, 0x1c,
+ 0x5c, 0x04, 0x83, 0x29, 0x17, 0x06, 0xb5, 0x8e, 0x79, 0xa8, 0x93, 0x67, 0x9b, 0x1e, 0x7c, 0x5a,
+ 0x17, 0x3c, 0xd4, 0x09, 0xf4, 0xa3, 0x16, 0x24, 0x1e, 0xbc, 0x28, 0xa5, 0x9c, 0x95, 0xab, 0x94,
+ 0xbb, 0x00, 0x3d, 0x81, 0xdf, 0x20, 0xc9, 0x0c, 0xc4, 0xd8, 0x6f, 0x10, 0xcc, 0x30, 0x94, 0x22,
+ 0x8a, 0x55, 0x2d, 0x43, 0xfa, 0x35, 0x52, 0x5c, 0x10, 0x1f, 0x83, 0xde, 0x06, 0xd9, 0x25, 0x8d,
+ 0x64, 0xa2, 0xb8, 0x65, 0x0a, 0xc4, 0x1c, 0x67, 0xff, 0x62, 0x0f, 0x9c, 0x6b, 0x1b, 0x0d, 0x8a,
+ 0x5e, 0xc6, 0x36, 0x9d, 0x88, 0xdc, 0x71, 0xf6, 0x92, 0x19, 0x9d, 0xae, 0x72, 0x30, 0x96, 0x78,
+ 0xf6, 0xfc, 0x93, 0x27, 0x66, 0x48, 0xa8, 0x30, 0x45, 0x3e, 0x06, 0x81, 0x35, 0x55, 0x62, 0xc5,
+ 0xa3, 0x50, 0x89, 0x3d, 0x0f, 0x10, 0x86, 0x0d, 0xee, 0x16, 0x58, 0x17, 0xef, 0x4a, 0xe3, 0x04,
+ 0x1e, 0xd5, 0x65, 0x81, 0xc1, 0x1a, 0x15, 0x9a, 0x87, 0xb1, 0x66, 0xe0, 0x47, 0x5c, 0x23, 0x3c,
+ 0xcf, 0x3d, 0x67, 0x7b, 0xcd, 0x40, 0x3c, 0x95, 0x04, 0x1e, 0xa7, 0x4a, 0xa0, 0x97, 0x60, 0x50,
+ 0x04, 0xe7, 0xa9, 0xf8, 0x7e, 0x43, 0x28, 0xa1, 0x94, 0x33, 0x69, 0x35, 0x46, 0x61, 0x9d, 0x4e,
+ 0x2b, 0xc6, 0xd4, 0xcc, 0xfd, 0x99, 0xc5, 0xb8, 0xaa, 0x59, 0xa3, 0x4b, 0x44, 0xa2, 0x1e, 0xe8,
+ 0x2a, 0x12, 0x75, 0xac, 0x96, 0x2b, 0x75, 0x6d, 0xf5, 0x84, 0x8e, 0x8a, 0xac, 0xaf, 0xf4, 0xc0,
+ 0x84, 0x98, 0x38, 0x0f, 0x7b, 0xba, 0xdc, 0x4c, 0x4f, 0x97, 0xa3, 0x50, 0xdc, 0xbd, 0x37, 0x67,
+ 0x8e, 0x7b, 0xce, 0xfc, 0xb0, 0x05, 0xa6, 0xa4, 0x86, 0xfe, 0xb7, 0xdc, 0x94, 0x78, 0x2f, 0xe5,
+ 0x4a, 0x7e, 0x71, 0x94, 0xdf, 0x77, 0x96, 0x1c, 0xcf, 0xfe, 0xd7, 0x16, 0x3c, 0xda, 0x91, 0x23,
+ 0x5a, 0x80, 0x12, 0x13, 0x27, 0xb5, 0x8b, 0xde, 0x93, 0xca, 0xb3, 0x5e, 0x22, 0x72, 0xa4, 0xdb,
+ 0xb8, 0x24, 0x5a, 0x48, 0xe5, 0x1e, 0x7c, 0x2a, 0x23, 0xf7, 0xe0, 0x49, 0xa3, 0x7b, 0x1e, 0x30,
+ 0xf9, 0xe0, 0x0f, 0xd2, 0x13, 0xc7, 0x78, 0xd5, 0x86, 0x3e, 0x60, 0x28, 0x1d, 0xed, 0x84, 0xd2,
+ 0x11, 0x99, 0xd4, 0xda, 0x19, 0xf2, 0x51, 0x18, 0x63, 0x51, 0xfb, 0xd8, 0x3b, 0x0f, 0xf1, 0xde,
+ 0xae, 0x10, 0xfb, 0x72, 0x2f, 0x27, 0x70, 0x38, 0x45, 0x6d, 0xff, 0x69, 0x11, 0xfa, 0xf8, 0xf2,
+ 0x3b, 0x86, 0xeb, 0xe5, 0x33, 0x50, 0x72, 0x77, 0x76, 0x5a, 0x3c, 0x9d, 0x5c, 0x6f, 0xec, 0x19,
+ 0xbc, 0x24, 0x81, 0x38, 0xc6, 0xa3, 0x45, 0xa1, 0xef, 0x6e, 0x13, 0x18, 0x98, 0x37, 0x7c, 0x7a,
+ 0xde, 0x89, 0x1c, 0x2e, 0x2b, 0xa9, 0x73, 0x36, 0xd6, 0x8c, 0xa3, 0x4f, 0x01, 0x84, 0x51, 0xe0,
+ 0x7a, 0x9b, 0x14, 0x26, 0x62, 0xab, 0x3f, 0xdd, 0x86, 0x5b, 0x55, 0x11, 0x73, 0x9e, 0xf1, 0x9e,
+ 0xa3, 0x10, 0x58, 0xe3, 0x88, 0xa6, 0x8d, 0x93, 0x7e, 0x2a, 0x31, 0x76, 0xc0, 0xb9, 0xc6, 0x63,
+ 0x36, 0xf5, 0x41, 0x28, 0x29, 0xe6, 0x9d, 0xb4, 0x5f, 0x43, 0xba, 0x58, 0xf4, 0x11, 0x18, 0x4d,
+ 0xb4, 0xed, 0x50, 0xca, 0xb3, 0x5f, 0xb2, 0x60, 0x94, 0x37, 0x66, 0xc1, 0xdb, 0x15, 0xa7, 0xc1,
+ 0x3d, 0x38, 0xd1, 0xc8, 0xd8, 0x95, 0xc5, 0xf0, 0x77, 0xbf, 0x8b, 0x2b, 0x65, 0x59, 0x16, 0x16,
+ 0x67, 0xd6, 0x81, 0x2e, 0xd1, 0x15, 0x47, 0x77, 0x5d, 0xa7, 0x21, 0xe2, 0x1b, 0x0c, 0xf1, 0xd5,
+ 0xc6, 0x61, 0x58, 0x61, 0xed, 0x3f, 0xb0, 0x60, 0x9c, 0xb7, 0xfc, 0x3a, 0xd9, 0x53, 0x7b, 0xd3,
+ 0x37, 0xb3, 0xed, 0x22, 0x91, 0x69, 0x21, 0x27, 0x91, 0xa9, 0xfe, 0x69, 0xc5, 0xb6, 0x9f, 0xf6,
+ 0x65, 0x0b, 0xc4, 0x0c, 0x39, 0x06, 0x7d, 0xc6, 0x77, 0x98, 0xfa, 0x8c, 0xa9, 0xfc, 0x45, 0x90,
+ 0xa3, 0xc8, 0xf8, 0x2b, 0x0b, 0xc6, 0x38, 0x41, 0x6c, 0xab, 0xff, 0xa6, 0x8e, 0xc3, 0xac, 0xf9,
+ 0x45, 0x99, 0xce, 0x97, 0xd7, 0xc9, 0xde, 0x9a, 0x5f, 0x71, 0xa2, 0xad, 0xec, 0x8f, 0x32, 0x06,
+ 0xab, 0xa7, 0xed, 0x60, 0xd5, 0xe5, 0x02, 0x32, 0xf2, 0x7c, 0x75, 0x08, 0x10, 0x70, 0xd8, 0x3c,
+ 0x5f, 0xf6, 0x9f, 0x59, 0x80, 0x78, 0x35, 0x86, 0xe0, 0x46, 0xc5, 0x21, 0x06, 0xd5, 0x0e, 0xba,
+ 0x78, 0x6b, 0x52, 0x18, 0xac, 0x51, 0x1d, 0x49, 0xf7, 0x24, 0x1c, 0x2e, 0x8a, 0x9d, 0x1d, 0x2e,
+ 0x0e, 0xd1, 0xa3, 0xff, 0xac, 0x0f, 0x92, 0x2f, 0xfb, 0xd0, 0x2d, 0x18, 0xaa, 0x39, 0x4d, 0x67,
+ 0xdd, 0x6d, 0xb8, 0x91, 0x4b, 0xc2, 0x76, 0xde, 0x58, 0x73, 0x1a, 0x9d, 0x30, 0x91, 0x6b, 0x10,
+ 0x6c, 0xf0, 0x41, 0xd3, 0x00, 0xcd, 0xc0, 0xdd, 0x75, 0x1b, 0x64, 0x93, 0xa9, 0x5d, 0x58, 0x44,
+ 0x15, 0xee, 0x1a, 0x26, 0xa1, 0x58, 0xa3, 0xc8, 0x08, 0xa3, 0x50, 0x7c, 0xc8, 0x61, 0x14, 0xe0,
+ 0xd8, 0xc2, 0x28, 0xf4, 0x1c, 0x2a, 0x8c, 0xc2, 0xc0, 0xa1, 0xc3, 0x28, 0xf4, 0x76, 0x15, 0x46,
+ 0x01, 0xc3, 0x29, 0x29, 0x7b, 0xd2, 0xff, 0x8b, 0x6e, 0x83, 0x88, 0x0b, 0x07, 0x0f, 0x03, 0x33,
+ 0x75, 0x7f, 0xbf, 0x7c, 0x0a, 0x67, 0x52, 0xe0, 0x9c, 0x92, 0xe8, 0x63, 0x30, 0xe9, 0x34, 0x1a,
+ 0xfe, 0x1d, 0x35, 0xa8, 0x0b, 0x61, 0xcd, 0x69, 0x70, 0x13, 0x48, 0x3f, 0xe3, 0x7a, 0xf6, 0xfe,
+ 0x7e, 0x79, 0x72, 0x26, 0x87, 0x06, 0xe7, 0x96, 0x46, 0x1f, 0x86, 0x52, 0x33, 0xf0, 0x6b, 0x2b,
+ 0xda, 0xf3, 0xe3, 0xf3, 0xb4, 0x03, 0x2b, 0x12, 0x78, 0xb0, 0x5f, 0x1e, 0x56, 0x7f, 0xd8, 0x81,
+ 0x1f, 0x17, 0xc8, 0x88, 0x8b, 0x30, 0x78, 0xa4, 0x71, 0x11, 0xb6, 0x61, 0xa2, 0x4a, 0x02, 0xd7,
+ 0x69, 0xb8, 0xf7, 0xa8, 0xbc, 0x2c, 0xf7, 0xa7, 0x35, 0x28, 0x05, 0x89, 0x1d, 0xb9, 0xab, 0x60,
+ 0xbd, 0x5a, 0xc2, 0x25, 0xb9, 0x03, 0xc7, 0x8c, 0xec, 0xff, 0x66, 0x41, 0xbf, 0x78, 0xc9, 0x77,
+ 0x0c, 0x52, 0xe3, 0x8c, 0x61, 0x94, 0x28, 0x67, 0x77, 0x18, 0x6b, 0x4c, 0xae, 0x39, 0x62, 0x29,
+ 0x61, 0x8e, 0x78, 0xb4, 0x1d, 0x93, 0xf6, 0x86, 0x88, 0xff, 0xaf, 0x48, 0xa5, 0x77, 0xe3, 0x4d,
+ 0xf9, 0xc3, 0xef, 0x82, 0x55, 0xe8, 0x0f, 0xc5, 0x9b, 0xe6, 0x42, 0xfe, 0x6b, 0x90, 0xe4, 0x20,
+ 0xc6, 0x5e, 0x74, 0xe2, 0x15, 0xb3, 0x64, 0x92, 0xf9, 0x58, 0xba, 0xf8, 0x10, 0x1f, 0x4b, 0x77,
+ 0x7a, 0x75, 0xdf, 0x73, 0x14, 0xaf, 0xee, 0xed, 0xaf, 0xb1, 0x93, 0x53, 0x87, 0x1f, 0x83, 0x50,
+ 0x75, 0xd5, 0x3c, 0x63, 0xed, 0x36, 0x33, 0x4b, 0x34, 0x2a, 0x47, 0xb8, 0xfa, 0x05, 0x0b, 0xce,
+ 0x65, 0x7c, 0x95, 0x26, 0x69, 0x3d, 0x0b, 0x03, 0x4e, 0xab, 0xee, 0xaa, 0xb5, 0xac, 0x99, 0x26,
+ 0x67, 0x04, 0x1c, 0x2b, 0x0a, 0x34, 0x07, 0xe3, 0xe4, 0x6e, 0xd3, 0xe5, 0x86, 0x5c, 0xdd, 0xf9,
+ 0xb8, 0xc8, 0x9f, 0x7f, 0x2e, 0x24, 0x91, 0x38, 0x4d, 0xaf, 0x02, 0x44, 0x15, 0x73, 0x03, 0x44,
+ 0xfd, 0xbc, 0x05, 0x83, 0xea, 0x55, 0xef, 0x43, 0xef, 0xed, 0x8f, 0x9a, 0xbd, 0xfd, 0x48, 0x9b,
+ 0xde, 0xce, 0xe9, 0xe6, 0xdf, 0x2b, 0xa8, 0xf6, 0x56, 0xfc, 0x20, 0xea, 0x42, 0x82, 0x7b, 0xf0,
+ 0x87, 0x13, 0x57, 0x60, 0xd0, 0x69, 0x36, 0x25, 0x42, 0x7a, 0xc0, 0xb1, 0xd0, 0xeb, 0x31, 0x18,
+ 0xeb, 0x34, 0xea, 0x1d, 0x47, 0x31, 0xf7, 0x1d, 0x47, 0x1d, 0x20, 0x72, 0x82, 0x4d, 0x12, 0x51,
+ 0x98, 0x70, 0xd8, 0xcd, 0xdf, 0x6f, 0x5a, 0x91, 0xdb, 0x98, 0x76, 0xbd, 0x28, 0x8c, 0x82, 0xe9,
+ 0x25, 0x2f, 0xba, 0x11, 0xf0, 0x2b, 0xa4, 0x16, 0x62, 0x4d, 0xf1, 0xc2, 0x1a, 0x5f, 0x19, 0xc1,
+ 0x82, 0xd5, 0xd1, 0x6b, 0xba, 0x52, 0xac, 0x0a, 0x38, 0x56, 0x14, 0xf6, 0x07, 0xd9, 0xe9, 0xc3,
+ 0xfa, 0xf4, 0x70, 0xe1, 0xc5, 0x7e, 0x72, 0x48, 0x8d, 0x06, 0x33, 0x8a, 0xce, 0xeb, 0x41, 0xcc,
+ 0xda, 0x6f, 0xf6, 0xb4, 0x62, 0xfd, 0x45, 0x64, 0x1c, 0xe9, 0x0c, 0x7d, 0x22, 0xe5, 0x1e, 0xf3,
+ 0x5c, 0x87, 0x53, 0xe3, 0x10, 0x0e, 0x31, 0x2c, 0x0f, 0x13, 0xcb, 0x52, 0xb3, 0x54, 0x11, 0xeb,
+ 0x42, 0xcb, 0xc3, 0x24, 0x10, 0x38, 0xa6, 0xa1, 0xc2, 0x94, 0xfa, 0x13, 0x4e, 0xa2, 0x38, 0x16,
+ 0xb0, 0xa2, 0x0e, 0xb1, 0x46, 0x81, 0x2e, 0x0b, 0x85, 0x02, 0xb7, 0x0b, 0x3c, 0x92, 0x50, 0x28,
+ 0xc8, 0xee, 0xd2, 0xb4, 0x40, 0x57, 0x60, 0x90, 0xdc, 0x8d, 0x48, 0xe0, 0x39, 0x0d, 0x5a, 0x43,
+ 0x6f, 0x1c, 0x3f, 0x73, 0x21, 0x06, 0x63, 0x9d, 0x06, 0xad, 0xc1, 0x68, 0xc8, 0xf5, 0x6c, 0x2a,
+ 0x48, 0x3c, 0xd7, 0x57, 0x3e, 0xad, 0xde, 0x53, 0x9b, 0xe8, 0x03, 0x06, 0xe2, 0xbb, 0x93, 0x8c,
+ 0x32, 0x91, 0x64, 0x81, 0x5e, 0x83, 0x91, 0x86, 0xef, 0xd4, 0x67, 0x9d, 0x86, 0xe3, 0xd5, 0x58,
+ 0xff, 0x0c, 0x98, 0x89, 0xa8, 0x97, 0x0d, 0x2c, 0x4e, 0x50, 0x53, 0xe1, 0x4d, 0x87, 0x88, 0x30,
+ 0x6d, 0x8e, 0xb7, 0x49, 0x42, 0x91, 0x0f, 0x9e, 0x09, 0x6f, 0xcb, 0x39, 0x34, 0x38, 0xb7, 0x34,
+ 0x7a, 0x19, 0x86, 0xe4, 0xe7, 0x6b, 0x41, 0x59, 0xe2, 0x27, 0x31, 0x1a, 0x0e, 0x1b, 0x94, 0x28,
+ 0x84, 0x93, 0xf2, 0xff, 0x5a, 0xe0, 0x6c, 0x6c, 0xb8, 0x35, 0x11, 0xa9, 0x80, 0x3f, 0x1f, 0xfe,
+ 0x88, 0x7c, 0xab, 0xb8, 0x90, 0x45, 0x74, 0xb0, 0x5f, 0x3e, 0x2b, 0x7a, 0x2d, 0x13, 0x8f, 0xb3,
+ 0x79, 0xa3, 0x15, 0x98, 0xd8, 0x22, 0x4e, 0x23, 0xda, 0x9a, 0xdb, 0x22, 0xb5, 0x6d, 0xb9, 0xe0,
+ 0x58, 0x98, 0x17, 0xed, 0xe9, 0xc8, 0xb5, 0x34, 0x09, 0xce, 0x2a, 0x87, 0xde, 0x82, 0xc9, 0x66,
+ 0x6b, 0xbd, 0xe1, 0x86, 0x5b, 0xab, 0x7e, 0xc4, 0x9c, 0x90, 0x66, 0xea, 0xf5, 0x80, 0x84, 0xfc,
+ 0x75, 0x29, 0x3b, 0x7a, 0x65, 0x20, 0x9d, 0x4a, 0x0e, 0x1d, 0xce, 0xe5, 0x80, 0xee, 0xc1, 0xc9,
+ 0xc4, 0x44, 0x10, 0x11, 0x31, 0x46, 0xf2, 0x53, 0xc4, 0x54, 0xb3, 0x0a, 0x88, 0xe0, 0x32, 0x59,
+ 0x28, 0x9c, 0x5d, 0x05, 0x7a, 0x05, 0xc0, 0x6d, 0x2e, 0x3a, 0x3b, 0x6e, 0x83, 0x5e, 0x15, 0x27,
+ 0xd8, 0x1c, 0xa1, 0xd7, 0x06, 0x58, 0xaa, 0x48, 0x28, 0xdd, 0x9b, 0xc5, 0xbf, 0x3d, 0xac, 0x51,
+ 0xa3, 0x65, 0x18, 0x11, 0xff, 0xf6, 0xc4, 0x90, 0xf2, 0xc0, 0x2c, 0x8f, 0xb3, 0xa8, 0x5a, 0x15,
+ 0x1d, 0x73, 0x90, 0x82, 0xe0, 0x44, 0x59, 0xb4, 0x09, 0xe7, 0x64, 0xa2, 0x3f, 0x7d, 0x7e, 0xca,
+ 0x31, 0x08, 0x59, 0x5e, 0x96, 0x01, 0xfe, 0x2a, 0x65, 0xa6, 0x1d, 0x21, 0x6e, 0xcf, 0x87, 0x9e,
+ 0xeb, 0xfa, 0x34, 0xe7, 0x6f, 0x8e, 0x4f, 0xc6, 0x11, 0x07, 0x97, 0x93, 0x48, 0x9c, 0xa6, 0x47,
+ 0x3e, 0x9c, 0x74, 0xbd, 0xac, 0x59, 0x7d, 0x8a, 0x31, 0xfa, 0x10, 0x7f, 0x6e, 0xdd, 0x7e, 0x46,
+ 0x67, 0xe2, 0x71, 0x36, 0xdf, 0x77, 0xe6, 0xf7, 0xf7, 0xfb, 0x16, 0x2d, 0xad, 0x49, 0xe7, 0xe8,
+ 0xd3, 0x30, 0xa4, 0x7f, 0x94, 0x90, 0x34, 0x2e, 0x66, 0x0b, 0xaf, 0xda, 0x9e, 0xc0, 0x65, 0x7b,
+ 0xb5, 0xee, 0x75, 0x1c, 0x36, 0x38, 0xa2, 0x5a, 0x46, 0x6c, 0x83, 0xcb, 0xdd, 0x49, 0x32, 0xdd,
+ 0xbb, 0xbd, 0x11, 0xc8, 0x9e, 0xee, 0x68, 0x19, 0x06, 0x6a, 0x0d, 0x97, 0x78, 0xd1, 0x52, 0xa5,
+ 0x5d, 0xf4, 0xc6, 0x39, 0x41, 0x23, 0xd6, 0x8f, 0x48, 0xb1, 0xc2, 0x61, 0x58, 0x71, 0xb0, 0x7f,
+ 0xb3, 0x00, 0xe5, 0x0e, 0xf9, 0x7a, 0x12, 0x66, 0x28, 0xab, 0x2b, 0x33, 0xd4, 0x0c, 0x8c, 0xc6,
+ 0xff, 0x74, 0x0d, 0x97, 0xf2, 0x64, 0xbd, 0x65, 0xa2, 0x71, 0x92, 0xbe, 0xeb, 0x47, 0x09, 0xba,
+ 0x25, 0xab, 0xa7, 0xe3, 0xb3, 0x1a, 0xc3, 0x82, 0xdd, 0xdb, 0xfd, 0xb5, 0x37, 0xd7, 0x1a, 0x69,
+ 0x7f, 0xad, 0x00, 0x27, 0x55, 0x17, 0x7e, 0xfb, 0x76, 0xdc, 0xcd, 0x74, 0xc7, 0x1d, 0x81, 0x2d,
+ 0xd7, 0xbe, 0x01, 0x7d, 0x3c, 0x1c, 0x65, 0x17, 0xe2, 0xf6, 0x63, 0x66, 0x94, 0x6c, 0x25, 0xe1,
+ 0x19, 0x91, 0xb2, 0xbf, 0xdf, 0x82, 0xd1, 0xc4, 0xeb, 0x36, 0x84, 0xb5, 0x27, 0xd0, 0x0f, 0x22,
+ 0x12, 0x67, 0x09, 0xdb, 0x17, 0xa0, 0x67, 0xcb, 0x0f, 0xa3, 0xa4, 0xa3, 0xc7, 0x35, 0x3f, 0x8c,
+ 0x30, 0xc3, 0xd8, 0x7f, 0x68, 0x41, 0xef, 0x9a, 0xe3, 0x7a, 0x91, 0x34, 0x0a, 0x58, 0x39, 0x46,
+ 0x81, 0x6e, 0xbe, 0x0b, 0xbd, 0x04, 0x7d, 0x64, 0x63, 0x83, 0xd4, 0x22, 0x31, 0xaa, 0x32, 0x14,
+ 0x42, 0xdf, 0x02, 0x83, 0x52, 0xf9, 0x8f, 0x55, 0xc6, 0xff, 0x62, 0x41, 0x8c, 0x6e, 0x43, 0x29,
+ 0x72, 0x77, 0xc8, 0x4c, 0xbd, 0x2e, 0x4c, 0xe5, 0x0f, 0x10, 0xbf, 0x63, 0x4d, 0x32, 0xc0, 0x31,
+ 0x2f, 0xfb, 0x8b, 0x05, 0x80, 0x38, 0x8e, 0x57, 0xa7, 0x4f, 0x9c, 0x4d, 0x19, 0x51, 0x2f, 0x66,
+ 0x18, 0x51, 0x51, 0xcc, 0x30, 0xc3, 0x82, 0xaa, 0xba, 0xa9, 0xd8, 0x55, 0x37, 0xf5, 0x1c, 0xa6,
+ 0x9b, 0xe6, 0x60, 0x3c, 0x8e, 0x43, 0x66, 0x86, 0x61, 0x64, 0x47, 0xe7, 0x5a, 0x12, 0x89, 0xd3,
+ 0xf4, 0x36, 0x81, 0x0b, 0x2a, 0x1c, 0x93, 0x38, 0xd1, 0x98, 0x1f, 0xb8, 0x6e, 0x94, 0xee, 0xd0,
+ 0x4f, 0xb1, 0x95, 0xb8, 0x90, 0x6b, 0x25, 0xfe, 0x09, 0x0b, 0x4e, 0x24, 0xeb, 0x61, 0x8f, 0xa6,
+ 0xbf, 0x60, 0xc1, 0x49, 0x66, 0x2b, 0x67, 0xb5, 0xa6, 0x2d, 0xf3, 0x2f, 0xb6, 0x0d, 0x31, 0x95,
+ 0xd3, 0xe2, 0x38, 0xe6, 0xc6, 0x4a, 0x16, 0x6b, 0x9c, 0x5d, 0xa3, 0xfd, 0x5f, 0x7b, 0x60, 0x32,
+ 0x2f, 0x36, 0x15, 0x7b, 0x26, 0xe2, 0xdc, 0xad, 0x6e, 0x93, 0x3b, 0xc2, 0x19, 0x3f, 0x7e, 0x26,
+ 0xc2, 0xc1, 0x58, 0xe2, 0x93, 0xe9, 0x4f, 0x0a, 0x5d, 0xa6, 0x3f, 0xd9, 0x82, 0xf1, 0x3b, 0x5b,
+ 0xc4, 0xbb, 0xe9, 0x85, 0x4e, 0xe4, 0x86, 0x1b, 0x2e, 0xb3, 0x2b, 0xf3, 0x79, 0x23, 0x73, 0x50,
+ 0x8f, 0xdf, 0x4e, 0x12, 0x1c, 0xec, 0x97, 0xcf, 0x19, 0x80, 0xb8, 0xc9, 0x7c, 0x23, 0xc1, 0x69,
+ 0xa6, 0xe9, 0xec, 0x31, 0x3d, 0x0f, 0x39, 0x7b, 0xcc, 0x8e, 0x2b, 0xbc, 0x51, 0xe4, 0x1b, 0x00,
+ 0x76, 0x63, 0x5c, 0x51, 0x50, 0xac, 0x51, 0xa0, 0x4f, 0x02, 0xd2, 0x33, 0x74, 0x19, 0xa1, 0x41,
+ 0x9f, 0xbb, 0xbf, 0x5f, 0x46, 0xab, 0x29, 0xec, 0xc1, 0x7e, 0x79, 0x82, 0x42, 0x97, 0x3c, 0x7a,
+ 0xf3, 0x8c, 0xe3, 0xa9, 0x65, 0x30, 0x42, 0xb7, 0x61, 0x8c, 0x42, 0xd9, 0x8a, 0x92, 0x71, 0x47,
+ 0xf9, 0x6d, 0xf1, 0x99, 0xfb, 0xfb, 0xe5, 0xb1, 0xd5, 0x04, 0x2e, 0x8f, 0x75, 0x8a, 0x09, 0x7a,
+ 0x05, 0x46, 0xe2, 0x79, 0x75, 0x9d, 0xec, 0xf1, 0x00, 0x3d, 0x25, 0xae, 0xf0, 0x5e, 0x31, 0x30,
+ 0x38, 0x41, 0x69, 0x7f, 0xc1, 0x82, 0x33, 0xb9, 0x19, 0xf1, 0xd1, 0x25, 0x18, 0x70, 0x9a, 0x2e,
+ 0x37, 0x5f, 0x88, 0xa3, 0x86, 0xa9, 0xc9, 0x2a, 0x4b, 0xdc, 0x78, 0xa1, 0xb0, 0x74, 0x87, 0xdf,
+ 0x76, 0xbd, 0x7a, 0x72, 0x87, 0xbf, 0xee, 0x7a, 0x75, 0xcc, 0x30, 0xea, 0xc8, 0x2a, 0xe6, 0x3e,
+ 0x45, 0xf8, 0x0a, 0x5d, 0xab, 0x19, 0xb9, 0xf3, 0x8f, 0xb7, 0x19, 0xe8, 0x19, 0xdd, 0xd4, 0x28,
+ 0xbc, 0x0a, 0x73, 0xcd, 0x8c, 0xdf, 0x67, 0x81, 0x78, 0xba, 0xdc, 0xc5, 0x99, 0xfc, 0x26, 0x0c,
+ 0xed, 0xa6, 0xb3, 0x17, 0x5e, 0xc8, 0x7f, 0xcb, 0x2d, 0x22, 0xae, 0x2b, 0x41, 0xdb, 0xc8, 0x54,
+ 0x68, 0xf0, 0xb2, 0xeb, 0x20, 0xb0, 0xf3, 0x84, 0x19, 0x14, 0x3a, 0xb7, 0xe6, 0x79, 0x80, 0x3a,
+ 0xa3, 0x65, 0x29, 0x8d, 0x0b, 0xa6, 0xc4, 0x35, 0xaf, 0x30, 0x58, 0xa3, 0xb2, 0xff, 0x79, 0x01,
+ 0x06, 0x65, 0xb6, 0xbc, 0x96, 0xd7, 0x8d, 0xda, 0xef, 0x50, 0xe9, 0xb3, 0xd1, 0x65, 0x28, 0x31,
+ 0xbd, 0x74, 0x25, 0xd6, 0x96, 0x2a, 0xad, 0xd0, 0x8a, 0x44, 0xe0, 0x98, 0x86, 0xee, 0x8e, 0x61,
+ 0x6b, 0x9d, 0x91, 0x27, 0x1e, 0xda, 0x56, 0x39, 0x18, 0x4b, 0x3c, 0xfa, 0x18, 0x8c, 0xf1, 0x72,
+ 0x81, 0xdf, 0x74, 0x36, 0xb9, 0x2d, 0xab, 0x57, 0x45, 0x2f, 0x19, 0x5b, 0x49, 0xe0, 0x0e, 0xf6,
+ 0xcb, 0x27, 0x92, 0x30, 0x66, 0xa4, 0x4d, 0x71, 0x61, 0x2e, 0x6b, 0xbc, 0x12, 0xba, 0xab, 0xa7,
+ 0x3c, 0xdd, 0x62, 0x14, 0xd6, 0xe9, 0xec, 0x4f, 0x03, 0x4a, 0xe7, 0x0d, 0x44, 0xaf, 0x73, 0x97,
+ 0x67, 0x37, 0x20, 0xf5, 0x76, 0x46, 0x5b, 0x3d, 0x46, 0x87, 0x7c, 0x23, 0xc7, 0x4b, 0x61, 0x55,
+ 0xde, 0xfe, 0x3f, 0x8b, 0x30, 0x96, 0x8c, 0x0a, 0x80, 0xae, 0x41, 0x1f, 0x17, 0x29, 0x05, 0xfb,
+ 0x36, 0x3e, 0x41, 0x5a, 0x2c, 0x01, 0x76, 0xb8, 0x0a, 0xa9, 0x54, 0x94, 0x47, 0x6f, 0xc1, 0x60,
+ 0xdd, 0xbf, 0xe3, 0xdd, 0x71, 0x82, 0xfa, 0x4c, 0x65, 0x49, 0x4c, 0xe7, 0x4c, 0x45, 0xc5, 0x7c,
+ 0x4c, 0xa6, 0xc7, 0x27, 0x60, 0xf6, 0xef, 0x18, 0x85, 0x75, 0x76, 0x68, 0x8d, 0x25, 0xfa, 0xd8,
+ 0x70, 0x37, 0x57, 0x9c, 0x66, 0xbb, 0xf7, 0x2f, 0x73, 0x92, 0x48, 0xe3, 0x3c, 0x2c, 0xb2, 0x81,
+ 0x70, 0x04, 0x8e, 0x19, 0xa1, 0xcf, 0xc2, 0x44, 0x98, 0x63, 0x3a, 0xc9, 0x4b, 0x23, 0xdb, 0xce,
+ 0x9a, 0x30, 0x7b, 0xfa, 0xfe, 0x7e, 0x79, 0x22, 0xcb, 0xc8, 0x92, 0x55, 0x8d, 0xfd, 0xa5, 0x13,
+ 0x60, 0x2c, 0x62, 0x23, 0xab, 0xb8, 0x75, 0x44, 0x59, 0xc5, 0x31, 0x0c, 0x90, 0x9d, 0x66, 0xb4,
+ 0x37, 0xef, 0x06, 0x62, 0x4c, 0x32, 0x79, 0x2e, 0x08, 0x9a, 0x34, 0x4f, 0x89, 0xc1, 0x8a, 0x4f,
+ 0x76, 0xea, 0xf7, 0xe2, 0x37, 0x31, 0xf5, 0x7b, 0xcf, 0x31, 0xa6, 0x7e, 0x5f, 0x85, 0xfe, 0x4d,
+ 0x37, 0xc2, 0xa4, 0xe9, 0x8b, 0xcb, 0x5c, 0xe6, 0x3c, 0xbc, 0xca, 0x49, 0xd2, 0x49, 0x86, 0x05,
+ 0x02, 0x4b, 0x26, 0xe8, 0x75, 0xb5, 0x02, 0xfb, 0xf2, 0x15, 0x2e, 0x69, 0xe7, 0x95, 0xcc, 0x35,
+ 0x28, 0x12, 0xbc, 0xf7, 0x3f, 0x68, 0x82, 0xf7, 0x45, 0x99, 0x96, 0x7d, 0x20, 0xff, 0xb1, 0x1a,
+ 0xcb, 0xba, 0xde, 0x21, 0x19, 0xfb, 0x2d, 0x3d, 0x95, 0x7d, 0x29, 0x7f, 0x27, 0x50, 0x59, 0xea,
+ 0xbb, 0x4c, 0x60, 0xff, 0x7d, 0x16, 0x9c, 0x4c, 0xa6, 0x9a, 0x65, 0x6f, 0x2a, 0x84, 0x9f, 0xc7,
+ 0x4b, 0xdd, 0xe4, 0xfe, 0x65, 0x05, 0x8c, 0x0a, 0x99, 0x8e, 0x34, 0x93, 0x0c, 0x67, 0x57, 0x47,
+ 0x3b, 0x3a, 0x58, 0xaf, 0x0b, 0x7f, 0x83, 0xc7, 0x72, 0x32, 0xe1, 0xb7, 0xc9, 0x7f, 0xbf, 0x96,
+ 0x91, 0x75, 0xfd, 0xf1, 0xbc, 0xac, 0xeb, 0x5d, 0xe7, 0x5a, 0x7f, 0x5d, 0xe5, 0xc0, 0x1f, 0xce,
+ 0x9f, 0x4a, 0x3c, 0xc3, 0x7d, 0xc7, 0xcc, 0xf7, 0xaf, 0xab, 0xcc, 0xf7, 0x6d, 0x22, 0x8b, 0xf3,
+ 0xbc, 0xf6, 0x1d, 0xf3, 0xdd, 0x6b, 0x39, 0xeb, 0x47, 0x8f, 0x26, 0x67, 0xbd, 0x71, 0xd4, 0xf0,
+ 0xb4, 0xe9, 0xcf, 0x74, 0x38, 0x6a, 0x0c, 0xbe, 0xed, 0x0f, 0x1b, 0x9e, 0x9f, 0x7f, 0xfc, 0x81,
+ 0xf2, 0xf3, 0xdf, 0xd2, 0xf3, 0xdd, 0xa3, 0x0e, 0x09, 0xdd, 0x29, 0x51, 0x97, 0x59, 0xee, 0x6f,
+ 0xe9, 0x07, 0xe0, 0x44, 0x3e, 0x5f, 0x75, 0xce, 0xa5, 0xf9, 0x66, 0x1e, 0x81, 0xa9, 0xec, 0xf9,
+ 0x27, 0x8e, 0x27, 0x7b, 0xfe, 0xc9, 0x23, 0xcf, 0x9e, 0x7f, 0xea, 0x18, 0xb2, 0xe7, 0x9f, 0x3e,
+ 0xc6, 0xec, 0xf9, 0xb7, 0x98, 0x73, 0x14, 0x0f, 0x00, 0x25, 0x22, 0xa1, 0x3f, 0x95, 0x13, 0x3f,
+ 0x2d, 0x1d, 0x25, 0x8a, 0x7f, 0x9c, 0x42, 0xe1, 0x98, 0x55, 0x46, 0x56, 0xfe, 0xc9, 0x87, 0x90,
+ 0x95, 0x7f, 0x35, 0xce, 0xca, 0x7f, 0x26, 0x7f, 0xa8, 0x33, 0x9e, 0xd3, 0xe4, 0xe4, 0xe2, 0xbf,
+ 0xa5, 0xe7, 0xd0, 0x7f, 0xa4, 0x8d, 0x15, 0x2c, 0x4b, 0xa1, 0xdc, 0x26, 0x73, 0xfe, 0x6b, 0x3c,
+ 0x73, 0xfe, 0xd9, 0xfc, 0x9d, 0x3c, 0x79, 0xdc, 0x19, 0xf9, 0xf2, 0x69, 0xbb, 0x54, 0xf0, 0x57,
+ 0x16, 0xf3, 0x3d, 0xa7, 0x5d, 0x2a, 0x7a, 0x6c, 0xba, 0x5d, 0x0a, 0x85, 0x63, 0x56, 0xf6, 0x0f,
+ 0x14, 0xe0, 0x7c, 0xfb, 0xf5, 0x16, 0x6b, 0xc9, 0x2b, 0xb1, 0x43, 0x40, 0x42, 0x4b, 0xce, 0xef,
+ 0x6c, 0x31, 0x55, 0xd7, 0xf1, 0x20, 0xaf, 0xc2, 0xb8, 0x7a, 0x87, 0xd3, 0x70, 0x6b, 0x7b, 0xab,
+ 0xf1, 0x35, 0x59, 0x45, 0x4e, 0xa8, 0x26, 0x09, 0x70, 0xba, 0x0c, 0x9a, 0x81, 0x51, 0x03, 0xb8,
+ 0x34, 0x2f, 0xee, 0x66, 0x71, 0x94, 0x71, 0x13, 0x8d, 0x93, 0xf4, 0xf6, 0xcf, 0x59, 0x70, 0x3a,
+ 0x27, 0xe5, 0x6b, 0xd7, 0xe1, 0x0e, 0x37, 0x60, 0xb4, 0x69, 0x16, 0xed, 0x10, 0xa1, 0xd5, 0x48,
+ 0x2c, 0xab, 0xda, 0x9a, 0x40, 0xe0, 0x24, 0x53, 0xfb, 0x67, 0x0a, 0x70, 0xae, 0xad, 0x63, 0x29,
+ 0xc2, 0x70, 0x6a, 0x73, 0x27, 0x74, 0xe6, 0x02, 0x52, 0x27, 0x5e, 0xe4, 0x3a, 0x8d, 0x6a, 0x93,
+ 0xd4, 0x34, 0x3b, 0x07, 0xf3, 0xd0, 0xbc, 0xba, 0x52, 0x9d, 0x49, 0x53, 0xe0, 0x9c, 0x92, 0x68,
+ 0x11, 0x50, 0x1a, 0x23, 0x46, 0x98, 0x65, 0x0f, 0x48, 0xf3, 0xc3, 0x19, 0x25, 0xd0, 0x07, 0x61,
+ 0x58, 0x39, 0xac, 0x6a, 0x23, 0xce, 0x36, 0x76, 0xac, 0x23, 0xb0, 0x49, 0x87, 0xae, 0xf0, 0xf4,
+ 0x13, 0x22, 0x51, 0x89, 0x30, 0x8a, 0x8c, 0xca, 0xdc, 0x12, 0x02, 0x8c, 0x75, 0x9a, 0xd9, 0x97,
+ 0x7f, 0xeb, 0x1b, 0xe7, 0xdf, 0xf7, 0xbb, 0xdf, 0x38, 0xff, 0xbe, 0x3f, 0xf8, 0xc6, 0xf9, 0xf7,
+ 0x7d, 0xd7, 0xfd, 0xf3, 0xd6, 0x6f, 0xdd, 0x3f, 0x6f, 0xfd, 0xee, 0xfd, 0xf3, 0xd6, 0x1f, 0xdc,
+ 0x3f, 0x6f, 0xfd, 0xf1, 0xfd, 0xf3, 0xd6, 0x17, 0xff, 0xe4, 0xfc, 0xfb, 0xde, 0x44, 0x71, 0x00,
+ 0xd1, 0xcb, 0x74, 0x74, 0x2e, 0xef, 0x5e, 0xf9, 0x5f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xbd, 0x0b,
+ 0x0a, 0x3d, 0x91, 0x13, 0x01, 0x00,
}
func (m *AWSElasticBlockStoreVolumeSource) Marshal() (dAtA []byte, err error) {
@@ -8752,6 +8820,15 @@ func (m *Container) MarshalToSizedBuffer(dAtA []byte) (int, error) {
_ = i
var l int
_ = l
+ if m.RestartPolicy != nil {
+ i -= len(*m.RestartPolicy)
+ copy(dAtA[i:], *m.RestartPolicy)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.RestartPolicy)))
+ i--
+ dAtA[i] = 0x1
+ i--
+ dAtA[i] = 0xc2
+ }
if len(m.ResizePolicy) > 0 {
for iNdEx := len(m.ResizePolicy) - 1; iNdEx >= 0; iNdEx-- {
{
@@ -10105,6 +10182,15 @@ func (m *EphemeralContainerCommon) MarshalToSizedBuffer(dAtA []byte) (int, error
_ = i
var l int
_ = l
+ if m.RestartPolicy != nil {
+ i -= len(*m.RestartPolicy)
+ copy(dAtA[i:], *m.RestartPolicy)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.RestartPolicy)))
+ i--
+ dAtA[i] = 0x1
+ i--
+ dAtA[i] = 0xc2
+ }
if len(m.ResizePolicy) > 0 {
for iNdEx := len(m.ResizePolicy) - 1; iNdEx >= 0; iNdEx-- {
{
@@ -11255,6 +11341,34 @@ func (m *HostAlias) MarshalToSizedBuffer(dAtA []byte) (int, error) {
return len(dAtA) - i, nil
}
+func (m *HostIP) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *HostIP) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *HostIP) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i -= len(m.IP)
+ copy(dAtA[i:], m.IP)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.IP)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
func (m *HostPathVolumeSource) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
@@ -13740,12 +13854,29 @@ func (m *PersistentVolumeClaimStatus) MarshalToSizedBuffer(dAtA []byte) (int, er
_ = i
var l int
_ = l
- if m.ResizeStatus != nil {
- i -= len(*m.ResizeStatus)
- copy(dAtA[i:], *m.ResizeStatus)
- i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ResizeStatus)))
- i--
- dAtA[i] = 0x32
+ if len(m.AllocatedResourceStatuses) > 0 {
+ keysForAllocatedResourceStatuses := make([]string, 0, len(m.AllocatedResourceStatuses))
+ for k := range m.AllocatedResourceStatuses {
+ keysForAllocatedResourceStatuses = append(keysForAllocatedResourceStatuses, string(k))
+ }
+ github_com_gogo_protobuf_sortkeys.Strings(keysForAllocatedResourceStatuses)
+ for iNdEx := len(keysForAllocatedResourceStatuses) - 1; iNdEx >= 0; iNdEx-- {
+ v := m.AllocatedResourceStatuses[ResourceName(keysForAllocatedResourceStatuses[iNdEx])]
+ baseI := i
+ i -= len(v)
+ copy(dAtA[i:], v)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(v)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(keysForAllocatedResourceStatuses[iNdEx])
+ copy(dAtA[i:], keysForAllocatedResourceStatuses[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAllocatedResourceStatuses[iNdEx])))
+ i--
+ dAtA[i] = 0xa
+ i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
+ i--
+ dAtA[i] = 0x3a
+ }
}
if len(m.AllocatedResources) > 0 {
keysForAllocatedResources := make([]string, 0, len(m.AllocatedResources))
@@ -14404,6 +14535,18 @@ func (m *PersistentVolumeStatus) MarshalToSizedBuffer(dAtA []byte) (int, error)
_ = i
var l int
_ = l
+ if m.LastPhaseTransitionTime != nil {
+ {
+ size, err := m.LastPhaseTransitionTime.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ }
i -= len(m.Reason)
copy(dAtA[i:], m.Reason)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason)))
@@ -15267,6 +15410,41 @@ func (m *PodResourceClaim) MarshalToSizedBuffer(dAtA []byte) (int, error) {
return len(dAtA) - i, nil
}
+func (m *PodResourceClaimStatus) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *PodResourceClaimStatus) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *PodResourceClaimStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.ResourceClaimName != nil {
+ i -= len(*m.ResourceClaimName)
+ copy(dAtA[i:], *m.ResourceClaimName)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ResourceClaimName)))
+ i--
+ dAtA[i] = 0x12
+ }
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
func (m *PodSchedulingGate) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
@@ -15936,6 +16114,36 @@ func (m *PodStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
_ = i
var l int
_ = l
+ if len(m.HostIPs) > 0 {
+ for iNdEx := len(m.HostIPs) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.HostIPs[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1
+ i--
+ dAtA[i] = 0x82
+ }
+ }
+ if len(m.ResourceClaimStatuses) > 0 {
+ for iNdEx := len(m.ResourceClaimStatuses) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.ResourceClaimStatuses[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x7a
+ }
+ }
i -= len(m.Resize)
copy(dAtA[i:], m.Resize)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resize)))
@@ -20970,6 +21178,10 @@ func (m *Container) Size() (n int) {
n += 2 + l + sovGenerated(uint64(l))
}
}
+ if m.RestartPolicy != nil {
+ l = len(*m.RestartPolicy)
+ n += 2 + l + sovGenerated(uint64(l))
+ }
return n
}
@@ -21469,6 +21681,10 @@ func (m *EphemeralContainerCommon) Size() (n int) {
n += 2 + l + sovGenerated(uint64(l))
}
}
+ if m.RestartPolicy != nil {
+ l = len(*m.RestartPolicy)
+ n += 2 + l + sovGenerated(uint64(l))
+ }
return n
}
@@ -21805,6 +22021,17 @@ func (m *HostAlias) Size() (n int) {
return n
}
+func (m *HostIP) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.IP)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
func (m *HostPathVolumeSource) Size() (n int) {
if m == nil {
return 0
@@ -22744,9 +22971,13 @@ func (m *PersistentVolumeClaimStatus) Size() (n int) {
n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
}
}
- if m.ResizeStatus != nil {
- l = len(*m.ResizeStatus)
- n += 1 + l + sovGenerated(uint64(l))
+ if len(m.AllocatedResourceStatuses) > 0 {
+ for k, v := range m.AllocatedResourceStatuses {
+ _ = k
+ _ = v
+ mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v)))
+ n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+ }
}
return n
}
@@ -22950,6 +23181,10 @@ func (m *PersistentVolumeStatus) Size() (n int) {
n += 1 + l + sovGenerated(uint64(l))
l = len(m.Reason)
n += 1 + l + sovGenerated(uint64(l))
+ if m.LastPhaseTransitionTime != nil {
+ l = m.LastPhaseTransitionTime.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
return n
}
@@ -23263,6 +23498,21 @@ func (m *PodResourceClaim) Size() (n int) {
return n
}
+func (m *PodResourceClaimStatus) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Name)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.ResourceClaimName != nil {
+ l = len(*m.ResourceClaimName)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
func (m *PodSchedulingGate) Size() (n int) {
if m == nil {
return 0
@@ -23552,6 +23802,18 @@ func (m *PodStatus) Size() (n int) {
}
l = len(m.Resize)
n += 1 + l + sovGenerated(uint64(l))
+ if len(m.ResourceClaimStatuses) > 0 {
+ for _, e := range m.ResourceClaimStatuses {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if len(m.HostIPs) > 0 {
+ for _, e := range m.HostIPs {
+ l = e.Size()
+ n += 2 + l + sovGenerated(uint64(l))
+ }
+ }
return n
}
@@ -25585,6 +25847,7 @@ func (this *Container) String() string {
`VolumeDevices:` + repeatedStringForVolumeDevices + `,`,
`StartupProbe:` + strings.Replace(this.StartupProbe.String(), "Probe", "Probe", 1) + `,`,
`ResizePolicy:` + repeatedStringForResizePolicy + `,`,
+ `RestartPolicy:` + valueToStringGenerated(this.RestartPolicy) + `,`,
`}`,
}, "")
return s
@@ -25960,6 +26223,7 @@ func (this *EphemeralContainerCommon) String() string {
`VolumeDevices:` + repeatedStringForVolumeDevices + `,`,
`StartupProbe:` + strings.Replace(this.StartupProbe.String(), "Probe", "Probe", 1) + `,`,
`ResizePolicy:` + repeatedStringForResizePolicy + `,`,
+ `RestartPolicy:` + valueToStringGenerated(this.RestartPolicy) + `,`,
`}`,
}, "")
return s
@@ -26221,6 +26485,16 @@ func (this *HostAlias) String() string {
}, "")
return s
}
+func (this *HostIP) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&HostIP{`,
+ `IP:` + fmt.Sprintf("%v", this.IP) + `,`,
+ `}`,
+ }, "")
+ return s
+}
func (this *HostPathVolumeSource) String() string {
if this == nil {
return "nil"
@@ -26972,13 +27246,23 @@ func (this *PersistentVolumeClaimStatus) String() string {
mapStringForAllocatedResources += fmt.Sprintf("%v: %v,", k, this.AllocatedResources[ResourceName(k)])
}
mapStringForAllocatedResources += "}"
+ keysForAllocatedResourceStatuses := make([]string, 0, len(this.AllocatedResourceStatuses))
+ for k := range this.AllocatedResourceStatuses {
+ keysForAllocatedResourceStatuses = append(keysForAllocatedResourceStatuses, string(k))
+ }
+ github_com_gogo_protobuf_sortkeys.Strings(keysForAllocatedResourceStatuses)
+ mapStringForAllocatedResourceStatuses := "map[ResourceName]ClaimResourceStatus{"
+ for _, k := range keysForAllocatedResourceStatuses {
+ mapStringForAllocatedResourceStatuses += fmt.Sprintf("%v: %v,", k, this.AllocatedResourceStatuses[ResourceName(k)])
+ }
+ mapStringForAllocatedResourceStatuses += "}"
s := strings.Join([]string{`&PersistentVolumeClaimStatus{`,
`Phase:` + fmt.Sprintf("%v", this.Phase) + `,`,
`AccessModes:` + fmt.Sprintf("%v", this.AccessModes) + `,`,
`Capacity:` + mapStringForCapacity + `,`,
`Conditions:` + repeatedStringForConditions + `,`,
`AllocatedResources:` + mapStringForAllocatedResources + `,`,
- `ResizeStatus:` + valueToStringGenerated(this.ResizeStatus) + `,`,
+ `AllocatedResourceStatuses:` + mapStringForAllocatedResourceStatuses + `,`,
`}`,
}, "")
return s
@@ -27088,6 +27372,7 @@ func (this *PersistentVolumeStatus) String() string {
`Phase:` + fmt.Sprintf("%v", this.Phase) + `,`,
`Message:` + fmt.Sprintf("%v", this.Message) + `,`,
`Reason:` + fmt.Sprintf("%v", this.Reason) + `,`,
+ `LastPhaseTransitionTime:` + strings.Replace(fmt.Sprintf("%v", this.LastPhaseTransitionTime), "Time", "v1.Time", 1) + `,`,
`}`,
}, "")
return s
@@ -27337,6 +27622,17 @@ func (this *PodResourceClaim) String() string {
}, "")
return s
}
+func (this *PodResourceClaimStatus) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&PodResourceClaimStatus{`,
+ `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+ `ResourceClaimName:` + valueToStringGenerated(this.ResourceClaimName) + `,`,
+ `}`,
+ }, "")
+ return s
+}
func (this *PodSchedulingGate) String() string {
if this == nil {
return "nil"
@@ -27533,6 +27829,16 @@ func (this *PodStatus) String() string {
repeatedStringForEphemeralContainerStatuses += strings.Replace(strings.Replace(f.String(), "ContainerStatus", "ContainerStatus", 1), `&`, ``, 1) + ","
}
repeatedStringForEphemeralContainerStatuses += "}"
+ repeatedStringForResourceClaimStatuses := "[]PodResourceClaimStatus{"
+ for _, f := range this.ResourceClaimStatuses {
+ repeatedStringForResourceClaimStatuses += strings.Replace(strings.Replace(f.String(), "PodResourceClaimStatus", "PodResourceClaimStatus", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForResourceClaimStatuses += "}"
+ repeatedStringForHostIPs := "[]HostIP{"
+ for _, f := range this.HostIPs {
+ repeatedStringForHostIPs += strings.Replace(strings.Replace(f.String(), "HostIP", "HostIP", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForHostIPs += "}"
s := strings.Join([]string{`&PodStatus{`,
`Phase:` + fmt.Sprintf("%v", this.Phase) + `,`,
`Conditions:` + repeatedStringForConditions + `,`,
@@ -27548,6 +27854,8 @@ func (this *PodStatus) String() string {
`PodIPs:` + repeatedStringForPodIPs + `,`,
`EphemeralContainerStatuses:` + repeatedStringForEphemeralContainerStatuses + `,`,
`Resize:` + fmt.Sprintf("%v", this.Resize) + `,`,
+ `ResourceClaimStatuses:` + repeatedStringForResourceClaimStatuses + `,`,
+ `HostIPs:` + repeatedStringForHostIPs + `,`,
`}`,
}, "")
return s
@@ -34125,6 +34433,39 @@ func (m *Container) Unmarshal(dAtA []byte) error {
return err
}
iNdEx = postIndex
+ case 24:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RestartPolicy", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := ContainerRestartPolicy(dAtA[iNdEx:postIndex])
+ m.RestartPolicy = &s
+ iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
@@ -38278,6 +38619,39 @@ func (m *EphemeralContainerCommon) Unmarshal(dAtA []byte) error {
return err
}
iNdEx = postIndex
+ case 24:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RestartPolicy", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := ContainerRestartPolicy(dAtA[iNdEx:postIndex])
+ m.RestartPolicy = &s
+ iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
@@ -41368,6 +41742,88 @@ func (m *HostAlias) Unmarshal(dAtA []byte) error {
}
return nil
}
+func (m *HostIP) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: HostIP: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: HostIP: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.IP = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
func (m *HostPathVolumeSource) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
@@ -49625,11 +50081,140 @@ func (m *PersistentVolumeClaimStatus) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- if m.AllocatedResources == nil {
- m.AllocatedResources = make(ResourceList)
+ if m.AllocatedResources == nil {
+ m.AllocatedResources = make(ResourceList)
+ }
+ var mapkey ResourceName
+ mapvalue := &resource.Quantity{}
+ for iNdEx < postIndex {
+ entryPreIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ if fieldNum == 1 {
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapkey |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postStringIndexmapkey > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ } else if fieldNum == 2 {
+ var mapmsglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ mapmsglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if mapmsglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postmsgIndex := iNdEx + mapmsglen
+ if postmsgIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postmsgIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue = &resource.Quantity{}
+ if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
+ return err
+ }
+ iNdEx = postmsgIndex
+ } else {
+ iNdEx = entryPreIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > postIndex {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+ m.AllocatedResources[ResourceName(mapkey)] = *mapvalue
+ iNdEx = postIndex
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AllocatedResourceStatuses", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.AllocatedResourceStatuses == nil {
+ m.AllocatedResourceStatuses = make(map[ResourceName]ClaimResourceStatus)
}
var mapkey ResourceName
- mapvalue := &resource.Quantity{}
+ var mapvalue ClaimResourceStatus
for iNdEx < postIndex {
entryPreIndex := iNdEx
var wire uint64
@@ -49678,7 +50263,7 @@ func (m *PersistentVolumeClaimStatus) Unmarshal(dAtA []byte) error {
mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey])
iNdEx = postStringIndexmapkey
} else if fieldNum == 2 {
- var mapmsglen int
+ var stringLenmapvalue uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -49688,26 +50273,24 @@ func (m *PersistentVolumeClaimStatus) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- mapmsglen |= int(b&0x7F) << shift
+ stringLenmapvalue |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
- if mapmsglen < 0 {
+ intStringLenmapvalue := int(stringLenmapvalue)
+ if intStringLenmapvalue < 0 {
return ErrInvalidLengthGenerated
}
- postmsgIndex := iNdEx + mapmsglen
- if postmsgIndex < 0 {
+ postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+ if postStringIndexmapvalue < 0 {
return ErrInvalidLengthGenerated
}
- if postmsgIndex > l {
+ if postStringIndexmapvalue > l {
return io.ErrUnexpectedEOF
}
- mapvalue = &resource.Quantity{}
- if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
- return err
- }
- iNdEx = postmsgIndex
+ mapvalue = ClaimResourceStatus(dAtA[iNdEx:postStringIndexmapvalue])
+ iNdEx = postStringIndexmapvalue
} else {
iNdEx = entryPreIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
@@ -49723,40 +50306,7 @@ func (m *PersistentVolumeClaimStatus) Unmarshal(dAtA []byte) error {
iNdEx += skippy
}
}
- m.AllocatedResources[ResourceName(mapkey)] = *mapvalue
- iNdEx = postIndex
- case 6:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ResizeStatus", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- s := PersistentVolumeClaimResizeStatus(dAtA[iNdEx:postIndex])
- m.ResizeStatus = &s
+ m.AllocatedResourceStatuses[ResourceName(mapkey)] = ((ClaimResourceStatus)(mapvalue))
iNdEx = postIndex
default:
iNdEx = preIndex
@@ -51526,6 +52076,42 @@ func (m *PersistentVolumeStatus) Unmarshal(dAtA []byte) error {
}
m.Reason = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LastPhaseTransitionTime", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.LastPhaseTransitionTime == nil {
+ m.LastPhaseTransitionTime = &v1.Time{}
+ }
+ if err := m.LastPhaseTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
@@ -54039,6 +54625,121 @@ func (m *PodResourceClaim) Unmarshal(dAtA []byte) error {
}
return nil
}
+func (m *PodResourceClaimStatus) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: PodResourceClaimStatus: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: PodResourceClaimStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ResourceClaimName", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.ResourceClaimName = &s
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
func (m *PodSchedulingGate) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
@@ -56483,6 +57184,74 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error {
}
m.Resize = PodResizeStatus(dAtA[iNdEx:postIndex])
iNdEx = postIndex
+ case 15:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ResourceClaimStatuses", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ResourceClaimStatuses = append(m.ResourceClaimStatuses, PodResourceClaimStatus{})
+ if err := m.ResourceClaimStatuses[len(m.ResourceClaimStatuses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 16:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field HostIPs", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.HostIPs = append(m.HostIPs, HostIP{})
+ if err := m.HostIPs[len(m.HostIPs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
diff --git a/vendor/k8s.io/api/core/v1/generated.proto b/vendor/k8s.io/api/core/v1/generated.proto
index 8ef67ca40b..901e837313 100644
--- a/vendor/k8s.io/api/core/v1/generated.proto
+++ b/vendor/k8s.io/api/core/v1/generated.proto
@@ -414,15 +414,9 @@ message ClaimSource {
//
// The template will be used to create a new ResourceClaim, which will
// be bound to this pod. When this pod is deleted, the ResourceClaim
- // will also be deleted. The name of the ResourceClaim will be -, where is the
- // PodResourceClaim.Name. Pod validation will reject the pod if the
- // concatenated name is not valid for a ResourceClaim (e.g. too long).
- //
- // An existing ResourceClaim with that name that is not owned by the
- // pod will not be used for the pod to avoid using an unrelated
- // resource by mistake. Scheduling and pod startup are then blocked
- // until the unrelated ResourceClaim is removed.
+ // will also be deleted. The pod name and resource name, along with a
+ // generated component, will be used to form a unique name for the
+ // ResourceClaim, which will be recorded in pod.status.resourceClaimStatuses.
//
// This field is immutable and no changes will be made to the
// corresponding ResourceClaim by the control plane after creating the
@@ -729,6 +723,25 @@ message Container {
// +listType=atomic
repeated ContainerResizePolicy resizePolicy = 23;
+ // RestartPolicy defines the restart behavior of individual containers in a pod.
+ // This field may only be set for init containers, and the only allowed value is "Always".
+ // For non-init containers or when this field is not specified,
+ // the restart behavior is defined by the Pod's restart policy and the container type.
+ // Setting the RestartPolicy as "Always" for the init container will have the following effect:
+ // this init container will be continually restarted on
+ // exit until all regular containers have terminated. Once all regular
+ // containers have completed, all init containers with restartPolicy "Always"
+ // will be shut down. This lifecycle differs from normal init containers and
+ // is often referred to as a "sidecar" container. Although this init
+ // container still starts in the init container sequence, it does not wait
+ // for the container to complete before proceeding to the next init
+ // container. Instead, the next init container starts immediately after this
+ // init container is started, or after any startupProbe has successfully
+ // completed.
+ // +featureGate=SidecarContainers
+ // +optional
+ optional string restartPolicy = 24;
+
// Pod volumes to mount into the container's filesystem.
// Cannot be updated.
// +optional
@@ -1147,6 +1160,8 @@ message EndpointPort {
//
// * Kubernetes-defined prefixed names:
// * 'kubernetes.io/h2c' - HTTP/2 over cleartext as described in https://www.rfc-editor.org/rfc/rfc7540
+ // * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455
+ // * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455
//
// * Other protocols should use implementation-defined prefixed names such as
// mycompany.com/my-custom-protocol.
@@ -1386,6 +1401,14 @@ message EphemeralContainerCommon {
// +listType=atomic
repeated ContainerResizePolicy resizePolicy = 23;
+ // Restart policy for the container to manage the restart behavior of each
+ // container within a pod.
+ // This may only be set for init containers. You cannot set this field on
+ // ephemeral containers.
+ // +featureGate=SidecarContainers
+ // +optional
+ optional string restartPolicy = 24;
+
// Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers.
// Cannot be updated.
// +optional
@@ -1871,6 +1894,12 @@ message HostAlias {
repeated string hostnames = 2;
}
+// HostIP represents a single IP address allocated to the host.
+message HostIP {
+ // IP is the IP address assigned to the host
+ optional string ip = 1;
+}
+
// Represents a host path mapped into a pod.
// Host path volumes do not support ownership management or SELinux relabeling.
message HostPathVolumeSource {
@@ -2863,25 +2892,71 @@ message PersistentVolumeClaimStatus {
// +patchStrategy=merge
repeated PersistentVolumeClaimCondition conditions = 4;
- // allocatedResources is the storage resource within AllocatedResources tracks the capacity allocated to a PVC. It may
- // be larger than the actual capacity when a volume expansion operation is requested.
+ // allocatedResources tracks the resources allocated to a PVC including its capacity.
+ // Key names follow standard Kubernetes label syntax. Valid values are either:
+ // * Un-prefixed keys:
+ // - storage - the capacity of the volume.
+ // * Custom resources must use implementation-defined prefixed names such as "example.com/my-custom-resource"
+ // Apart from above values - keys that are unprefixed or have kubernetes.io prefix are considered
+ // reserved and hence may not be used.
+ //
+ // Capacity reported here may be larger than the actual capacity when a volume expansion operation
+ // is requested.
// For storage quota, the larger value from allocatedResources and PVC.spec.resources is used.
// If allocatedResources is not set, PVC.spec.resources alone is used for quota calculation.
// If a volume expansion capacity request is lowered, allocatedResources is only
// lowered if there are no expansion operations in progress and if the actual volume capacity
// is equal or lower than the requested capacity.
+ //
+ // A controller that receives PVC update with previously unknown resourceName
+ // should ignore the update for the purpose it was designed. For example - a controller that
+ // only is responsible for resizing capacity of the volume, should ignore PVC updates that change other valid
+ // resources associated with PVC.
+ //
// This is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.
// +featureGate=RecoverVolumeExpansionFailure
// +optional
map allocatedResources = 5;
- // resizeStatus stores status of resize operation.
- // ResizeStatus is not set by default but when expansion is complete resizeStatus is set to empty
- // string by resize controller or kubelet.
+ // allocatedResourceStatuses stores status of resource being resized for the given PVC.
+ // Key names follow standard Kubernetes label syntax. Valid values are either:
+ // * Un-prefixed keys:
+ // - storage - the capacity of the volume.
+ // * Custom resources must use implementation-defined prefixed names such as "example.com/my-custom-resource"
+ // Apart from above values - keys that are unprefixed or have kubernetes.io prefix are considered
+ // reserved and hence may not be used.
+ //
+ // ClaimResourceStatus can be in any of following states:
+ // - ControllerResizeInProgress:
+ // State set when resize controller starts resizing the volume in control-plane.
+ // - ControllerResizeFailed:
+ // State set when resize has failed in resize controller with a terminal error.
+ // - NodeResizePending:
+ // State set when resize controller has finished resizing the volume but further resizing of
+ // volume is needed on the node.
+ // - NodeResizeInProgress:
+ // State set when kubelet starts resizing the volume.
+ // - NodeResizeFailed:
+ // State set when resizing has failed in kubelet with a terminal error. Transient errors don't set
+ // NodeResizeFailed.
+ // For example: if expanding a PVC for more capacity - this field can be one of the following states:
+ // - pvc.status.allocatedResourceStatus['storage'] = "ControllerResizeInProgress"
+ // - pvc.status.allocatedResourceStatus['storage'] = "ControllerResizeFailed"
+ // - pvc.status.allocatedResourceStatus['storage'] = "NodeResizePending"
+ // - pvc.status.allocatedResourceStatus['storage'] = "NodeResizeInProgress"
+ // - pvc.status.allocatedResourceStatus['storage'] = "NodeResizeFailed"
+ // When this field is not set, it means that no resize operation is in progress for the given PVC.
+ //
+ // A controller that receives PVC update with previously unknown resourceName or ClaimResourceStatus
+ // should ignore the update for the purpose it was designed. For example - a controller that
+ // only is responsible for resizing capacity of the volume, should ignore PVC updates that change other valid
+ // resources associated with PVC.
+ //
// This is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.
// +featureGate=RecoverVolumeExpansionFailure
+ // +mapType=granular
// +optional
- optional string resizeStatus = 6;
+ map allocatedResourceStatuses = 7;
}
// PersistentVolumeClaimTemplate is used to produce
@@ -3103,6 +3178,13 @@ message PersistentVolumeStatus {
// for machine parsing and tidy display in the CLI.
// +optional
optional string reason = 3;
+
+ // lastPhaseTransitionTime is the time the phase transitioned from one to another
+ // and automatically resets to current time everytime a volume phase transitions.
+ // This is an alpha field and requires enabling PersistentVolumeLastPhaseTransitionTime feature.
+ // +featureGate=PersistentVolumeLastPhaseTransitionTime
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastPhaseTransitionTime = 4;
}
// Represents a Photon Controller persistent disk resource.
@@ -3347,12 +3429,9 @@ message PodExecOptions {
repeated string command = 6;
}
-// IP address information for entries in the (plural) PodIPs field.
-// Each entry includes:
-//
-// IP: An IP address allocated to the pod. Routable at least within the cluster.
+// PodIP represents a single IP address allocated to the pod.
message PodIP {
- // ip is an IP address (IPv4 or IPv6) assigned to the pod
+ // IP is the IP address assigned to the pod
optional string ip = 1;
}
@@ -3469,6 +3548,24 @@ message PodResourceClaim {
optional ClaimSource source = 2;
}
+// PodResourceClaimStatus is stored in the PodStatus for each PodResourceClaim
+// which references a ResourceClaimTemplate. It stores the generated name for
+// the corresponding ResourceClaim.
+message PodResourceClaimStatus {
+ // Name uniquely identifies this resource claim inside the pod.
+ // This must match the name of an entry in pod.spec.resourceClaims,
+ // which implies that the string must be a DNS_LABEL.
+ optional string name = 1;
+
+ // ResourceClaimName is the name of the ResourceClaim that was
+ // generated for the Pod in the namespace of the Pod. It this is
+ // unset, then generating a ResourceClaim was not necessary. The
+ // pod.spec.resourceClaims entry can be ignored in this case.
+ //
+ // +optional
+ optional string resourceClaimName = 2;
+}
+
// PodSchedulingGate is associated to a Pod to guard its scheduling.
message PodSchedulingGate {
// Name of the scheduling gate.
@@ -3960,11 +4057,23 @@ message PodStatus {
// +optional
optional string nominatedNodeName = 11;
- // IP address of the host to which the pod is assigned. Empty if not yet scheduled.
+ // hostIP holds the IP address of the host to which the pod is assigned. Empty if the pod has not started yet.
+ // A pod can be assigned to a node that has a problem in kubelet which in turns mean that HostIP will
+ // not be updated even if there is a node is assigned to pod
// +optional
optional string hostIP = 5;
- // IP address allocated to the pod. Routable at least within the cluster.
+ // hostIPs holds the IP addresses allocated to the host. If this field is specified, the first entry must
+ // match the hostIP field. This list is empty if the pod has not started yet.
+ // A pod can be assigned to a node that has a problem in kubelet which in turns means that HostIPs will
+ // not be updated even if there is a node is assigned to this pod.
+ // +optional
+ // +patchStrategy=merge
+ // +patchMergeKey=ip
+ // +listType=atomic
+ repeated HostIP hostIPs = 16;
+
+ // podIP address allocated to the pod. Routable at least within the cluster.
// Empty if not yet allocated.
// +optional
optional string podIP = 6;
@@ -4009,6 +4118,15 @@ message PodStatus {
// +featureGate=InPlacePodVerticalScaling
// +optional
optional string resize = 14;
+
+ // Status of resource claims.
+ // +patchMergeKey=name
+ // +patchStrategy=merge,retainKeys
+ // +listType=map
+ // +listMapKey=name
+ // +featureGate=DynamicResourceAllocation
+ // +optional
+ repeated PodResourceClaimStatus resourceClaimStatuses = 15;
}
// PodStatusResult is a wrapper for PodStatus returned by kubelet that can be encode/decoded
@@ -4753,7 +4871,7 @@ message SeccompProfile {
// localhostProfile indicates a profile defined in a file on the node should be used.
// The profile must be preconfigured on the node to work.
// Must be a descending path, relative to the kubelet's configured seccomp profile location.
- // Must only be set if type is "Localhost".
+ // Must be set if type is "Localhost". Must NOT be set for any other type.
// +optional
optional string localhostProfile = 2;
}
@@ -5124,10 +5242,19 @@ message ServicePort {
optional string protocol = 2;
// The application protocol for this port.
+ // This is used as a hint for implementations to offer richer behavior for protocols that they understand.
// This field follows standard Kubernetes label syntax.
- // Un-prefixed names are reserved for IANA standard service names (as per
+ // Valid values are either:
+ //
+ // * Un-prefixed protocol names - reserved for IANA standard service names (as per
// RFC-6335 and https://www.iana.org/assignments/service-names).
- // Non-standard protocols should use prefixed names such as
+ //
+ // * Kubernetes-defined prefixed names:
+ // * 'kubernetes.io/h2c' - HTTP/2 over cleartext as described in https://www.rfc-editor.org/rfc/rfc7540
+ // * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455
+ // * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455
+ //
+ // * Other protocols should use implementation-defined prefixed names such as
// mycompany.com/my-custom-protocol.
// +optional
optional string appProtocol = 6;
@@ -5275,10 +5402,9 @@ message ServiceSpec {
// This feature depends on whether the underlying cloud-provider supports specifying
// the loadBalancerIP when a load balancer is created.
// This field will be ignored if the cloud-provider does not support the feature.
- // Deprecated: This field was under-specified and its meaning varies across implementations,
- // and it cannot support dual-stack.
- // As of Kubernetes v1.24, users are encouraged to use implementation-specific annotations when available.
- // This field may be removed in a future API version.
+ // Deprecated: This field was under-specified and its meaning varies across implementations.
+ // Using it is non-portable and it may not support dual-stack.
+ // Users are encouraged to use implementation-specific annotations when available.
// +optional
optional string loadBalancerIP = 8;
@@ -6053,12 +6179,9 @@ message WindowsSecurityContextOptions {
optional string runAsUserName = 3;
// HostProcess determines if a container should be run as a 'Host Process' container.
- // This field is alpha-level and will only be honored by components that enable the
- // WindowsHostProcessContainers feature flag. Setting this field without the feature
- // flag will result in errors when validating the Pod. All of a Pod's containers must
- // have the same effective HostProcess value (it is not allowed to have a mix of HostProcess
- // containers and non-HostProcess containers). In addition, if HostProcess is true
- // then HostNetwork must also be set to true.
+ // All of a Pod's containers must have the same effective HostProcess value
+ // (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers).
+ // In addition, if HostProcess is true then HostNetwork must also be set to true.
// +optional
optional bool hostProcess = 4;
}
diff --git a/vendor/k8s.io/api/core/v1/types.go b/vendor/k8s.io/api/core/v1/types.go
index c831d5961c..9e05c22356 100644
--- a/vendor/k8s.io/api/core/v1/types.go
+++ b/vendor/k8s.io/api/core/v1/types.go
@@ -411,6 +411,12 @@ type PersistentVolumeStatus struct {
// for machine parsing and tidy display in the CLI.
// +optional
Reason string `json:"reason,omitempty" protobuf:"bytes,3,opt,name=reason"`
+ // lastPhaseTransitionTime is the time the phase transitioned from one to another
+ // and automatically resets to current time everytime a volume phase transitions.
+ // This is an alpha field and requires enabling PersistentVolumeLastPhaseTransitionTime feature.
+ // +featureGate=PersistentVolumeLastPhaseTransitionTime
+ // +optional
+ LastPhaseTransitionTime *metav1.Time `json:"lastPhaseTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastPhaseTransitionTime"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
@@ -558,23 +564,27 @@ const (
)
// +enum
-type PersistentVolumeClaimResizeStatus string
+// When a controller receives persistentvolume claim update with ClaimResourceStatus for a resource
+// that it does not recognizes, then it should ignore that update and let other controllers
+// handle it.
+type ClaimResourceStatus string
const (
- // When expansion is complete, the empty string is set by resize controller or kubelet.
- PersistentVolumeClaimNoExpansionInProgress PersistentVolumeClaimResizeStatus = ""
- // State set when resize controller starts expanding the volume in control-plane
- PersistentVolumeClaimControllerExpansionInProgress PersistentVolumeClaimResizeStatus = "ControllerExpansionInProgress"
- // State set when expansion has failed in resize controller with a terminal error.
- // Transient errors such as timeout should not set this status and should leave ResizeStatus
+ // State set when resize controller starts resizing the volume in control-plane.
+ PersistentVolumeClaimControllerResizeInProgress ClaimResourceStatus = "ControllerResizeInProgress"
+
+ // State set when resize has failed in resize controller with a terminal error.
+ // Transient errors such as timeout should not set this status and should leave allocatedResourceStatus
// unmodified, so as resize controller can resume the volume expansion.
- PersistentVolumeClaimControllerExpansionFailed PersistentVolumeClaimResizeStatus = "ControllerExpansionFailed"
- // State set when resize controller has finished expanding the volume but further expansion is needed on the node.
- PersistentVolumeClaimNodeExpansionPending PersistentVolumeClaimResizeStatus = "NodeExpansionPending"
- // State set when kubelet starts expanding the volume.
- PersistentVolumeClaimNodeExpansionInProgress PersistentVolumeClaimResizeStatus = "NodeExpansionInProgress"
- // State set when expansion has failed in kubelet with a terminal error. Transient errors don't set NodeExpansionFailed.
- PersistentVolumeClaimNodeExpansionFailed PersistentVolumeClaimResizeStatus = "NodeExpansionFailed"
+ PersistentVolumeClaimControllerResizeFailed ClaimResourceStatus = "ControllerResizeFailed"
+
+ // State set when resize controller has finished resizing the volume but further resizing of volume
+ // is needed on the node.
+ PersistentVolumeClaimNodeResizePending ClaimResourceStatus = "NodeResizePending"
+ // State set when kubelet starts resizing the volume.
+ PersistentVolumeClaimNodeResizeInProgress ClaimResourceStatus = "NodeResizeInProgress"
+ // State set when resizing has failed in kubelet with a terminal error. Transient errors don't set NodeResizeFailed
+ PersistentVolumeClaimNodeResizeFailed ClaimResourceStatus = "NodeResizeFailed"
)
// PersistentVolumeClaimCondition contains details about state of pvc
@@ -615,24 +625,74 @@ type PersistentVolumeClaimStatus struct {
// +patchMergeKey=type
// +patchStrategy=merge
Conditions []PersistentVolumeClaimCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,4,rep,name=conditions"`
- // allocatedResources is the storage resource within AllocatedResources tracks the capacity allocated to a PVC. It may
- // be larger than the actual capacity when a volume expansion operation is requested.
+ // allocatedResources tracks the resources allocated to a PVC including its capacity.
+ // Key names follow standard Kubernetes label syntax. Valid values are either:
+ // * Un-prefixed keys:
+ // - storage - the capacity of the volume.
+ // * Custom resources must use implementation-defined prefixed names such as "example.com/my-custom-resource"
+ // Apart from above values - keys that are unprefixed or have kubernetes.io prefix are considered
+ // reserved and hence may not be used.
+ //
+ // Capacity reported here may be larger than the actual capacity when a volume expansion operation
+ // is requested.
// For storage quota, the larger value from allocatedResources and PVC.spec.resources is used.
// If allocatedResources is not set, PVC.spec.resources alone is used for quota calculation.
// If a volume expansion capacity request is lowered, allocatedResources is only
// lowered if there are no expansion operations in progress and if the actual volume capacity
// is equal or lower than the requested capacity.
+ //
+ // A controller that receives PVC update with previously unknown resourceName
+ // should ignore the update for the purpose it was designed. For example - a controller that
+ // only is responsible for resizing capacity of the volume, should ignore PVC updates that change other valid
+ // resources associated with PVC.
+ //
// This is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.
// +featureGate=RecoverVolumeExpansionFailure
// +optional
AllocatedResources ResourceList `json:"allocatedResources,omitempty" protobuf:"bytes,5,rep,name=allocatedResources,casttype=ResourceList,castkey=ResourceName"`
- // resizeStatus stores status of resize operation.
- // ResizeStatus is not set by default but when expansion is complete resizeStatus is set to empty
- // string by resize controller or kubelet.
+
+ // resizestatus is tombstoned since the field was replaced by allocatedResourceStatus
+ // ResizeStatus *PersistentVolumeClaimResizeStatus `json:"resizeStatus,omitempty" protobuf:"bytes,6,opt,name=resizeStatus,casttype=PersistentVolumeClaimResizeStatus"`
+
+ // allocatedResourceStatuses stores status of resource being resized for the given PVC.
+ // Key names follow standard Kubernetes label syntax. Valid values are either:
+ // * Un-prefixed keys:
+ // - storage - the capacity of the volume.
+ // * Custom resources must use implementation-defined prefixed names such as "example.com/my-custom-resource"
+ // Apart from above values - keys that are unprefixed or have kubernetes.io prefix are considered
+ // reserved and hence may not be used.
+ //
+ // ClaimResourceStatus can be in any of following states:
+ // - ControllerResizeInProgress:
+ // State set when resize controller starts resizing the volume in control-plane.
+ // - ControllerResizeFailed:
+ // State set when resize has failed in resize controller with a terminal error.
+ // - NodeResizePending:
+ // State set when resize controller has finished resizing the volume but further resizing of
+ // volume is needed on the node.
+ // - NodeResizeInProgress:
+ // State set when kubelet starts resizing the volume.
+ // - NodeResizeFailed:
+ // State set when resizing has failed in kubelet with a terminal error. Transient errors don't set
+ // NodeResizeFailed.
+ // For example: if expanding a PVC for more capacity - this field can be one of the following states:
+ // - pvc.status.allocatedResourceStatus['storage'] = "ControllerResizeInProgress"
+ // - pvc.status.allocatedResourceStatus['storage'] = "ControllerResizeFailed"
+ // - pvc.status.allocatedResourceStatus['storage'] = "NodeResizePending"
+ // - pvc.status.allocatedResourceStatus['storage'] = "NodeResizeInProgress"
+ // - pvc.status.allocatedResourceStatus['storage'] = "NodeResizeFailed"
+ // When this field is not set, it means that no resize operation is in progress for the given PVC.
+ //
+ // A controller that receives PVC update with previously unknown resourceName or ClaimResourceStatus
+ // should ignore the update for the purpose it was designed. For example - a controller that
+ // only is responsible for resizing capacity of the volume, should ignore PVC updates that change other valid
+ // resources associated with PVC.
+ //
// This is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.
// +featureGate=RecoverVolumeExpansionFailure
+ // +mapType=granular
// +optional
- ResizeStatus *PersistentVolumeClaimResizeStatus `json:"resizeStatus,omitempty" protobuf:"bytes,6,opt,name=resizeStatus,casttype=PersistentVolumeClaimResizeStatus"`
+ AllocatedResourceStatuses map[ResourceName]ClaimResourceStatus `json:"allocatedResourceStatuses,omitempty" protobuf:"bytes,7,rep,name=allocatedResourceStatuses"`
}
// +enum
@@ -2446,6 +2506,24 @@ type Container struct {
// +optional
// +listType=atomic
ResizePolicy []ContainerResizePolicy `json:"resizePolicy,omitempty" protobuf:"bytes,23,rep,name=resizePolicy"`
+ // RestartPolicy defines the restart behavior of individual containers in a pod.
+ // This field may only be set for init containers, and the only allowed value is "Always".
+ // For non-init containers or when this field is not specified,
+ // the restart behavior is defined by the Pod's restart policy and the container type.
+ // Setting the RestartPolicy as "Always" for the init container will have the following effect:
+ // this init container will be continually restarted on
+ // exit until all regular containers have terminated. Once all regular
+ // containers have completed, all init containers with restartPolicy "Always"
+ // will be shut down. This lifecycle differs from normal init containers and
+ // is often referred to as a "sidecar" container. Although this init
+ // container still starts in the init container sequence, it does not wait
+ // for the container to complete before proceeding to the next init
+ // container. Instead, the next init container starts immediately after this
+ // init container is started, or after any startupProbe has successfully
+ // completed.
+ // +featureGate=SidecarContainers
+ // +optional
+ RestartPolicy *ContainerRestartPolicy `json:"restartPolicy,omitempty" protobuf:"bytes,24,opt,name=restartPolicy,casttype=ContainerRestartPolicy"`
// Pod volumes to mount into the container's filesystem.
// Cannot be updated.
// +optional
@@ -2842,6 +2920,14 @@ const (
RestartPolicyNever RestartPolicy = "Never"
)
+// ContainerRestartPolicy is the restart policy for a single container.
+// This may only be set for init containers and only allowed value is "Always".
+type ContainerRestartPolicy string
+
+const (
+ ContainerRestartPolicyAlways ContainerRestartPolicy = "Always"
+)
+
// DNSPolicy defines how a pod's DNS will be configured.
// +enum
type DNSPolicy string
@@ -3524,15 +3610,9 @@ type ClaimSource struct {
//
// The template will be used to create a new ResourceClaim, which will
// be bound to this pod. When this pod is deleted, the ResourceClaim
- // will also be deleted. The name of the ResourceClaim will be -, where is the
- // PodResourceClaim.Name. Pod validation will reject the pod if the
- // concatenated name is not valid for a ResourceClaim (e.g. too long).
- //
- // An existing ResourceClaim with that name that is not owned by the
- // pod will not be used for the pod to avoid using an unrelated
- // resource by mistake. Scheduling and pod startup are then blocked
- // until the unrelated ResourceClaim is removed.
+ // will also be deleted. The pod name and resource name, along with a
+ // generated component, will be used to form a unique name for the
+ // ResourceClaim, which will be recorded in pod.status.resourceClaimStatuses.
//
// This field is immutable and no changes will be made to the
// corresponding ResourceClaim by the control plane after creating the
@@ -3540,6 +3620,24 @@ type ClaimSource struct {
ResourceClaimTemplateName *string `json:"resourceClaimTemplateName,omitempty" protobuf:"bytes,2,opt,name=resourceClaimTemplateName"`
}
+// PodResourceClaimStatus is stored in the PodStatus for each PodResourceClaim
+// which references a ResourceClaimTemplate. It stores the generated name for
+// the corresponding ResourceClaim.
+type PodResourceClaimStatus struct {
+ // Name uniquely identifies this resource claim inside the pod.
+ // This must match the name of an entry in pod.spec.resourceClaims,
+ // which implies that the string must be a DNS_LABEL.
+ Name string `json:"name" protobuf:"bytes,1,name=name"`
+
+ // ResourceClaimName is the name of the ResourceClaim that was
+ // generated for the Pod in the namespace of the Pod. It this is
+ // unset, then generating a ResourceClaim was not necessary. The
+ // pod.spec.resourceClaims entry can be ignored in this case.
+ //
+ // +optional
+ ResourceClaimName *string `json:"resourceClaimName,omitempty" protobuf:"bytes,2,opt,name=resourceClaimName"`
+}
+
// OSName is the set of OS'es that can be used in OS.
type OSName string
@@ -3838,7 +3936,7 @@ type SeccompProfile struct {
// localhostProfile indicates a profile defined in a file on the node should be used.
// The profile must be preconfigured on the node to work.
// Must be a descending path, relative to the kubelet's configured seccomp profile location.
- // Must only be set if type is "Localhost".
+ // Must be set if type is "Localhost". Must NOT be set for any other type.
// +optional
LocalhostProfile *string `json:"localhostProfile,omitempty" protobuf:"bytes,2,opt,name=localhostProfile"`
}
@@ -3899,12 +3997,15 @@ type PodDNSConfigOption struct {
Value *string `json:"value,omitempty" protobuf:"bytes,2,opt,name=value"`
}
-// IP address information for entries in the (plural) PodIPs field.
-// Each entry includes:
-//
-// IP: An IP address allocated to the pod. Routable at least within the cluster.
+// PodIP represents a single IP address allocated to the pod.
type PodIP struct {
- // ip is an IP address (IPv4 or IPv6) assigned to the pod
+ // IP is the IP address assigned to the pod
+ IP string `json:"ip,omitempty" protobuf:"bytes,1,opt,name=ip"`
+}
+
+// HostIP represents a single IP address allocated to the host.
+type HostIP struct {
+ // IP is the IP address assigned to the host
IP string `json:"ip,omitempty" protobuf:"bytes,1,opt,name=ip"`
}
@@ -3976,6 +4077,13 @@ type EphemeralContainerCommon struct {
// +optional
// +listType=atomic
ResizePolicy []ContainerResizePolicy `json:"resizePolicy,omitempty" protobuf:"bytes,23,rep,name=resizePolicy"`
+ // Restart policy for the container to manage the restart behavior of each
+ // container within a pod.
+ // This may only be set for init containers. You cannot set this field on
+ // ephemeral containers.
+ // +featureGate=SidecarContainers
+ // +optional
+ RestartPolicy *ContainerRestartPolicy `json:"restartPolicy,omitempty" protobuf:"bytes,24,opt,name=restartPolicy,casttype=ContainerRestartPolicy"`
// Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers.
// Cannot be updated.
// +optional
@@ -4128,10 +4236,23 @@ type PodStatus struct {
// +optional
NominatedNodeName string `json:"nominatedNodeName,omitempty" protobuf:"bytes,11,opt,name=nominatedNodeName"`
- // IP address of the host to which the pod is assigned. Empty if not yet scheduled.
+ // hostIP holds the IP address of the host to which the pod is assigned. Empty if the pod has not started yet.
+ // A pod can be assigned to a node that has a problem in kubelet which in turns mean that HostIP will
+ // not be updated even if there is a node is assigned to pod
// +optional
HostIP string `json:"hostIP,omitempty" protobuf:"bytes,5,opt,name=hostIP"`
- // IP address allocated to the pod. Routable at least within the cluster.
+
+ // hostIPs holds the IP addresses allocated to the host. If this field is specified, the first entry must
+ // match the hostIP field. This list is empty if the pod has not started yet.
+ // A pod can be assigned to a node that has a problem in kubelet which in turns means that HostIPs will
+ // not be updated even if there is a node is assigned to this pod.
+ // +optional
+ // +patchStrategy=merge
+ // +patchMergeKey=ip
+ // +listType=atomic
+ HostIPs []HostIP `json:"hostIPs,omitempty" protobuf:"bytes,16,rep,name=hostIPs" patchStrategy:"merge" patchMergeKey:"ip"`
+
+ // podIP address allocated to the pod. Routable at least within the cluster.
// Empty if not yet allocated.
// +optional
PodIP string `json:"podIP,omitempty" protobuf:"bytes,6,opt,name=podIP"`
@@ -4174,6 +4295,15 @@ type PodStatus struct {
// +featureGate=InPlacePodVerticalScaling
// +optional
Resize PodResizeStatus `json:"resize,omitempty" protobuf:"bytes,14,opt,name=resize,casttype=PodResizeStatus"`
+
+ // Status of resource claims.
+ // +patchMergeKey=name
+ // +patchStrategy=merge,retainKeys
+ // +listType=map
+ // +listMapKey=name
+ // +featureGate=DynamicResourceAllocation
+ // +optional
+ ResourceClaimStatuses []PodResourceClaimStatus `json:"resourceClaimStatuses,omitempty" patchStrategy:"merge,retainKeys" patchMergeKey:"name" protobuf:"bytes,15,rep,name=resourceClaimStatuses"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
@@ -4714,10 +4844,9 @@ type ServiceSpec struct {
// This feature depends on whether the underlying cloud-provider supports specifying
// the loadBalancerIP when a load balancer is created.
// This field will be ignored if the cloud-provider does not support the feature.
- // Deprecated: This field was under-specified and its meaning varies across implementations,
- // and it cannot support dual-stack.
- // As of Kubernetes v1.24, users are encouraged to use implementation-specific annotations when available.
- // This field may be removed in a future API version.
+ // Deprecated: This field was under-specified and its meaning varies across implementations.
+ // Using it is non-portable and it may not support dual-stack.
+ // Users are encouraged to use implementation-specific annotations when available.
// +optional
LoadBalancerIP string `json:"loadBalancerIP,omitempty" protobuf:"bytes,8,opt,name=loadBalancerIP"`
@@ -4866,10 +4995,19 @@ type ServicePort struct {
Protocol Protocol `json:"protocol,omitempty" protobuf:"bytes,2,opt,name=protocol,casttype=Protocol"`
// The application protocol for this port.
+ // This is used as a hint for implementations to offer richer behavior for protocols that they understand.
// This field follows standard Kubernetes label syntax.
- // Un-prefixed names are reserved for IANA standard service names (as per
+ // Valid values are either:
+ //
+ // * Un-prefixed protocol names - reserved for IANA standard service names (as per
// RFC-6335 and https://www.iana.org/assignments/service-names).
- // Non-standard protocols should use prefixed names such as
+ //
+ // * Kubernetes-defined prefixed names:
+ // * 'kubernetes.io/h2c' - HTTP/2 over cleartext as described in https://www.rfc-editor.org/rfc/rfc7540
+ // * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455
+ // * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455
+ //
+ // * Other protocols should use implementation-defined prefixed names such as
// mycompany.com/my-custom-protocol.
// +optional
AppProtocol *string `json:"appProtocol,omitempty" protobuf:"bytes,6,opt,name=appProtocol"`
@@ -5110,6 +5248,8 @@ type EndpointPort struct {
//
// * Kubernetes-defined prefixed names:
// * 'kubernetes.io/h2c' - HTTP/2 over cleartext as described in https://www.rfc-editor.org/rfc/rfc7540
+ // * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455
+ // * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455
//
// * Other protocols should use implementation-defined prefixed names such as
// mycompany.com/my-custom-protocol.
@@ -6802,12 +6942,9 @@ type WindowsSecurityContextOptions struct {
RunAsUserName *string `json:"runAsUserName,omitempty" protobuf:"bytes,3,opt,name=runAsUserName"`
// HostProcess determines if a container should be run as a 'Host Process' container.
- // This field is alpha-level and will only be honored by components that enable the
- // WindowsHostProcessContainers feature flag. Setting this field without the feature
- // flag will result in errors when validating the Pod. All of a Pod's containers must
- // have the same effective HostProcess value (it is not allowed to have a mix of HostProcess
- // containers and non-HostProcess containers). In addition, if HostProcess is true
- // then HostNetwork must also be set to true.
+ // All of a Pod's containers must have the same effective HostProcess value
+ // (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers).
+ // In addition, if HostProcess is true then HostNetwork must also be set to true.
// +optional
HostProcess *bool `json:"hostProcess,omitempty" protobuf:"bytes,4,opt,name=hostProcess"`
}
diff --git a/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go
index a01ae37173..9734d8b41e 100644
--- a/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go
+++ b/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go
@@ -212,7 +212,7 @@ func (CinderVolumeSource) SwaggerDoc() map[string]string {
var map_ClaimSource = map[string]string{
"": "ClaimSource describes a reference to a ResourceClaim.\n\nExactly one of these fields should be set. Consumers of this type must treat an empty object as if it has an unknown value.",
"resourceClaimName": "ResourceClaimName is the name of a ResourceClaim object in the same namespace as this pod.",
- "resourceClaimTemplateName": "ResourceClaimTemplateName is the name of a ResourceClaimTemplate object in the same namespace as this pod.\n\nThe template will be used to create a new ResourceClaim, which will be bound to this pod. When this pod is deleted, the ResourceClaim will also be deleted. The name of the ResourceClaim will be -, where is the PodResourceClaim.Name. Pod validation will reject the pod if the concatenated name is not valid for a ResourceClaim (e.g. too long).\n\nAn existing ResourceClaim with that name that is not owned by the pod will not be used for the pod to avoid using an unrelated resource by mistake. Scheduling and pod startup are then blocked until the unrelated ResourceClaim is removed.\n\nThis field is immutable and no changes will be made to the corresponding ResourceClaim by the control plane after creating the ResourceClaim.",
+ "resourceClaimTemplateName": "ResourceClaimTemplateName is the name of a ResourceClaimTemplate object in the same namespace as this pod.\n\nThe template will be used to create a new ResourceClaim, which will be bound to this pod. When this pod is deleted, the ResourceClaim will also be deleted. The pod name and resource name, along with a generated component, will be used to form a unique name for the ResourceClaim, which will be recorded in pod.status.resourceClaimStatuses.\n\nThis field is immutable and no changes will be made to the corresponding ResourceClaim by the control plane after creating the ResourceClaim.",
}
func (ClaimSource) SwaggerDoc() map[string]string {
@@ -347,6 +347,7 @@ var map_Container = map[string]string{
"env": "List of environment variables to set in the container. Cannot be updated.",
"resources": "Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/",
"resizePolicy": "Resources resize policy for the container.",
+ "restartPolicy": "RestartPolicy defines the restart behavior of individual containers in a pod. This field may only be set for init containers, and the only allowed value is \"Always\". For non-init containers or when this field is not specified, the restart behavior is defined by the Pod's restart policy and the container type. Setting the RestartPolicy as \"Always\" for the init container will have the following effect: this init container will be continually restarted on exit until all regular containers have terminated. Once all regular containers have completed, all init containers with restartPolicy \"Always\" will be shut down. This lifecycle differs from normal init containers and is often referred to as a \"sidecar\" container. Although this init container still starts in the init container sequence, it does not wait for the container to complete before proceeding to the next init container. Instead, the next init container starts immediately after this init container is started, or after any startupProbe has successfully completed.",
"volumeMounts": "Pod volumes to mount into the container's filesystem. Cannot be updated.",
"volumeDevices": "volumeDevices is the list of block devices to be used by the container.",
"livenessProbe": "Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes",
@@ -530,7 +531,7 @@ var map_EndpointPort = map[string]string{
"name": "The name of this port. This must match the 'name' field in the corresponding ServicePort. Must be a DNS_LABEL. Optional only if one port is defined.",
"port": "The port number of the endpoint.",
"protocol": "The IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP.",
- "appProtocol": "The application protocol for this port. This is used as a hint for implementations to offer richer behavior for protocols that they understand. This field follows standard Kubernetes label syntax. Valid values are either:\n\n* Un-prefixed protocol names - reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names).\n\n* Kubernetes-defined prefixed names:\n * 'kubernetes.io/h2c' - HTTP/2 over cleartext as described in https://www.rfc-editor.org/rfc/rfc7540\n\n* Other protocols should use implementation-defined prefixed names such as mycompany.com/my-custom-protocol.",
+ "appProtocol": "The application protocol for this port. This is used as a hint for implementations to offer richer behavior for protocols that they understand. This field follows standard Kubernetes label syntax. Valid values are either:\n\n* Un-prefixed protocol names - reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names).\n\n* Kubernetes-defined prefixed names:\n * 'kubernetes.io/h2c' - HTTP/2 over cleartext as described in https://www.rfc-editor.org/rfc/rfc7540\n * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455\n * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455\n\n* Other protocols should use implementation-defined prefixed names such as mycompany.com/my-custom-protocol.",
}
func (EndpointPort) SwaggerDoc() map[string]string {
@@ -623,6 +624,7 @@ var map_EphemeralContainerCommon = map[string]string{
"env": "List of environment variables to set in the container. Cannot be updated.",
"resources": "Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources already allocated to the pod.",
"resizePolicy": "Resources resize policy for the container.",
+ "restartPolicy": "Restart policy for the container to manage the restart behavior of each container within a pod. This may only be set for init containers. You cannot set this field on ephemeral containers.",
"volumeMounts": "Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers. Cannot be updated.",
"volumeDevices": "volumeDevices is the list of block devices to be used by the container.",
"livenessProbe": "Probes are not allowed for ephemeral containers.",
@@ -850,6 +852,15 @@ func (HostAlias) SwaggerDoc() map[string]string {
return map_HostAlias
}
+var map_HostIP = map[string]string{
+ "": "HostIP represents a single IP address allocated to the host.",
+ "ip": "IP is the IP address assigned to the host",
+}
+
+func (HostIP) SwaggerDoc() map[string]string {
+ return map_HostIP
+}
+
var map_HostPathVolumeSource = map[string]string{
"": "Represents a host path mapped into a pod. Host path volumes do not support ownership management or SELinux relabeling.",
"path": "path of the directory on the host. If the path is a symlink, it will follow the link to the real path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath",
@@ -1344,13 +1355,13 @@ func (PersistentVolumeClaimSpec) SwaggerDoc() map[string]string {
}
var map_PersistentVolumeClaimStatus = map[string]string{
- "": "PersistentVolumeClaimStatus is the current status of a persistent volume claim.",
- "phase": "phase represents the current phase of PersistentVolumeClaim.",
- "accessModes": "accessModes contains the actual access modes the volume backing the PVC has. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1",
- "capacity": "capacity represents the actual resources of the underlying volume.",
- "conditions": "conditions is the current Condition of persistent volume claim. If underlying persistent volume is being resized then the Condition will be set to 'ResizeStarted'.",
- "allocatedResources": "allocatedResources is the storage resource within AllocatedResources tracks the capacity allocated to a PVC. It may be larger than the actual capacity when a volume expansion operation is requested. For storage quota, the larger value from allocatedResources and PVC.spec.resources is used. If allocatedResources is not set, PVC.spec.resources alone is used for quota calculation. If a volume expansion capacity request is lowered, allocatedResources is only lowered if there are no expansion operations in progress and if the actual volume capacity is equal or lower than the requested capacity. This is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.",
- "resizeStatus": "resizeStatus stores status of resize operation. ResizeStatus is not set by default but when expansion is complete resizeStatus is set to empty string by resize controller or kubelet. This is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.",
+ "": "PersistentVolumeClaimStatus is the current status of a persistent volume claim.",
+ "phase": "phase represents the current phase of PersistentVolumeClaim.",
+ "accessModes": "accessModes contains the actual access modes the volume backing the PVC has. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1",
+ "capacity": "capacity represents the actual resources of the underlying volume.",
+ "conditions": "conditions is the current Condition of persistent volume claim. If underlying persistent volume is being resized then the Condition will be set to 'ResizeStarted'.",
+ "allocatedResources": "allocatedResources tracks the resources allocated to a PVC including its capacity. Key names follow standard Kubernetes label syntax. Valid values are either:\n\t* Un-prefixed keys:\n\t\t- storage - the capacity of the volume.\n\t* Custom resources must use implementation-defined prefixed names such as \"example.com/my-custom-resource\"\nApart from above values - keys that are unprefixed or have kubernetes.io prefix are considered reserved and hence may not be used.\n\nCapacity reported here may be larger than the actual capacity when a volume expansion operation is requested. For storage quota, the larger value from allocatedResources and PVC.spec.resources is used. If allocatedResources is not set, PVC.spec.resources alone is used for quota calculation. If a volume expansion capacity request is lowered, allocatedResources is only lowered if there are no expansion operations in progress and if the actual volume capacity is equal or lower than the requested capacity.\n\nA controller that receives PVC update with previously unknown resourceName should ignore the update for the purpose it was designed. For example - a controller that only is responsible for resizing capacity of the volume, should ignore PVC updates that change other valid resources associated with PVC.\n\nThis is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.",
+ "allocatedResourceStatuses": "allocatedResourceStatuses stores status of resource being resized for the given PVC. Key names follow standard Kubernetes label syntax. Valid values are either:\n\t* Un-prefixed keys:\n\t\t- storage - the capacity of the volume.\n\t* Custom resources must use implementation-defined prefixed names such as \"example.com/my-custom-resource\"\nApart from above values - keys that are unprefixed or have kubernetes.io prefix are considered reserved and hence may not be used.\n\nClaimResourceStatus can be in any of following states:\n\t- ControllerResizeInProgress:\n\t\tState set when resize controller starts resizing the volume in control-plane.\n\t- ControllerResizeFailed:\n\t\tState set when resize has failed in resize controller with a terminal error.\n\t- NodeResizePending:\n\t\tState set when resize controller has finished resizing the volume but further resizing of\n\t\tvolume is needed on the node.\n\t- NodeResizeInProgress:\n\t\tState set when kubelet starts resizing the volume.\n\t- NodeResizeFailed:\n\t\tState set when resizing has failed in kubelet with a terminal error. Transient errors don't set\n\t\tNodeResizeFailed.\nFor example: if expanding a PVC for more capacity - this field can be one of the following states:\n\t- pvc.status.allocatedResourceStatus['storage'] = \"ControllerResizeInProgress\"\n - pvc.status.allocatedResourceStatus['storage'] = \"ControllerResizeFailed\"\n - pvc.status.allocatedResourceStatus['storage'] = \"NodeResizePending\"\n - pvc.status.allocatedResourceStatus['storage'] = \"NodeResizeInProgress\"\n - pvc.status.allocatedResourceStatus['storage'] = \"NodeResizeFailed\"\nWhen this field is not set, it means that no resize operation is in progress for the given PVC.\n\nA controller that receives PVC update with previously unknown resourceName or ClaimResourceStatus should ignore the update for the purpose it was designed. For example - a controller that only is responsible for resizing capacity of the volume, should ignore PVC updates that change other valid resources associated with PVC.\n\nThis is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.",
}
func (PersistentVolumeClaimStatus) SwaggerDoc() map[string]string {
@@ -1434,10 +1445,11 @@ func (PersistentVolumeSpec) SwaggerDoc() map[string]string {
}
var map_PersistentVolumeStatus = map[string]string{
- "": "PersistentVolumeStatus is the current status of a persistent volume.",
- "phase": "phase indicates if a volume is available, bound to a claim, or released by a claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#phase",
- "message": "message is a human-readable message indicating details about why the volume is in this state.",
- "reason": "reason is a brief CamelCase string that describes any failure and is meant for machine parsing and tidy display in the CLI.",
+ "": "PersistentVolumeStatus is the current status of a persistent volume.",
+ "phase": "phase indicates if a volume is available, bound to a claim, or released by a claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#phase",
+ "message": "message is a human-readable message indicating details about why the volume is in this state.",
+ "reason": "reason is a brief CamelCase string that describes any failure and is meant for machine parsing and tidy display in the CLI.",
+ "lastPhaseTransitionTime": "lastPhaseTransitionTime is the time the phase transitioned from one to another and automatically resets to current time everytime a volume phase transitions. This is an alpha field and requires enabling PersistentVolumeLastPhaseTransitionTime feature.",
}
func (PersistentVolumeStatus) SwaggerDoc() map[string]string {
@@ -1559,8 +1571,8 @@ func (PodExecOptions) SwaggerDoc() map[string]string {
}
var map_PodIP = map[string]string{
- "": "IP address information for entries in the (plural) PodIPs field. Each entry includes:\n\n\tIP: An IP address allocated to the pod. Routable at least within the cluster.",
- "ip": "ip is an IP address (IPv4 or IPv6) assigned to the pod",
+ "": "PodIP represents a single IP address allocated to the pod.",
+ "ip": "IP is the IP address assigned to the pod",
}
func (PodIP) SwaggerDoc() map[string]string {
@@ -1640,6 +1652,16 @@ func (PodResourceClaim) SwaggerDoc() map[string]string {
return map_PodResourceClaim
}
+var map_PodResourceClaimStatus = map[string]string{
+ "": "PodResourceClaimStatus is stored in the PodStatus for each PodResourceClaim which references a ResourceClaimTemplate. It stores the generated name for the corresponding ResourceClaim.",
+ "name": "Name uniquely identifies this resource claim inside the pod. This must match the name of an entry in pod.spec.resourceClaims, which implies that the string must be a DNS_LABEL.",
+ "resourceClaimName": "ResourceClaimName is the name of the ResourceClaim that was generated for the Pod in the namespace of the Pod. It this is unset, then generating a ResourceClaim was not necessary. The pod.spec.resourceClaims entry can be ignored in this case.",
+}
+
+func (PodResourceClaimStatus) SwaggerDoc() map[string]string {
+ return map_PodResourceClaimStatus
+}
+
var map_PodSchedulingGate = map[string]string{
"": "PodSchedulingGate is associated to a Pod to guard its scheduling.",
"name": "Name of the scheduling gate. Each scheduling gate must have a unique name field.",
@@ -1730,8 +1752,9 @@ var map_PodStatus = map[string]string{
"message": "A human readable message indicating details about why the pod is in this condition.",
"reason": "A brief CamelCase message indicating details about why the pod is in this state. e.g. 'Evicted'",
"nominatedNodeName": "nominatedNodeName is set only when this pod preempts other pods on the node, but it cannot be scheduled right away as preemption victims receive their graceful termination periods. This field does not guarantee that the pod will be scheduled on this node. Scheduler may decide to place the pod elsewhere if other nodes become available sooner. Scheduler may also decide to give the resources on this node to a higher priority pod that is created after preemption. As a result, this field may be different than PodSpec.nodeName when the pod is scheduled.",
- "hostIP": "IP address of the host to which the pod is assigned. Empty if not yet scheduled.",
- "podIP": "IP address allocated to the pod. Routable at least within the cluster. Empty if not yet allocated.",
+ "hostIP": "hostIP holds the IP address of the host to which the pod is assigned. Empty if the pod has not started yet. A pod can be assigned to a node that has a problem in kubelet which in turns mean that HostIP will not be updated even if there is a node is assigned to pod",
+ "hostIPs": "hostIPs holds the IP addresses allocated to the host. If this field is specified, the first entry must match the hostIP field. This list is empty if the pod has not started yet. A pod can be assigned to a node that has a problem in kubelet which in turns means that HostIPs will not be updated even if there is a node is assigned to this pod.",
+ "podIP": "podIP address allocated to the pod. Routable at least within the cluster. Empty if not yet allocated.",
"podIPs": "podIPs holds the IP addresses allocated to the pod. If this field is specified, the 0th entry must match the podIP field. Pods may be allocated at most 1 value for each of IPv4 and IPv6. This list is empty if no IPs have been allocated yet.",
"startTime": "RFC 3339 date and time at which the object was acknowledged by the Kubelet. This is before the Kubelet pulled the container image(s) for the pod.",
"initContainerStatuses": "The list has one entry per init container in the manifest. The most recent successful init container will have ready = true, the most recently started container will have startTime set. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status",
@@ -1739,6 +1762,7 @@ var map_PodStatus = map[string]string{
"qosClass": "The Quality of Service (QOS) classification assigned to the pod based on resource requirements See PodQOSClass type for available QOS classes More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-qos/#quality-of-service-classes",
"ephemeralContainerStatuses": "Status for any ephemeral containers that have run in this pod.",
"resize": "Status of resources resize desired for pod's containers. It is empty if no resources resize is pending. Any changes to container resources will automatically set this to \"Proposed\"",
+ "resourceClaimStatuses": "Status of resource claims.",
}
func (PodStatus) SwaggerDoc() map[string]string {
@@ -2134,7 +2158,7 @@ func (ScopedResourceSelectorRequirement) SwaggerDoc() map[string]string {
var map_SeccompProfile = map[string]string{
"": "SeccompProfile defines a pod/container's seccomp profile settings. Only one profile source may be set.",
"type": "type indicates which kind of seccomp profile will be applied. Valid options are:\n\nLocalhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied.",
- "localhostProfile": "localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must only be set if type is \"Localhost\".",
+ "localhostProfile": "localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must be set if type is \"Localhost\". Must NOT be set for any other type.",
}
func (SeccompProfile) SwaggerDoc() map[string]string {
@@ -2301,7 +2325,7 @@ var map_ServicePort = map[string]string{
"": "ServicePort contains information on service's port.",
"name": "The name of this port within the service. This must be a DNS_LABEL. All ports within a ServiceSpec must have unique names. When considering the endpoints for a Service, this must match the 'name' field in the EndpointPort. Optional if only one ServicePort is defined on this service.",
"protocol": "The IP protocol for this port. Supports \"TCP\", \"UDP\", and \"SCTP\". Default is TCP.",
- "appProtocol": "The application protocol for this port. This field follows standard Kubernetes label syntax. Un-prefixed names are reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names). Non-standard protocols should use prefixed names such as mycompany.com/my-custom-protocol.",
+ "appProtocol": "The application protocol for this port. This is used as a hint for implementations to offer richer behavior for protocols that they understand. This field follows standard Kubernetes label syntax. Valid values are either:\n\n* Un-prefixed protocol names - reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names).\n\n* Kubernetes-defined prefixed names:\n * 'kubernetes.io/h2c' - HTTP/2 over cleartext as described in https://www.rfc-editor.org/rfc/rfc7540\n * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455\n * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455\n\n* Other protocols should use implementation-defined prefixed names such as mycompany.com/my-custom-protocol.",
"port": "The port that will be exposed by this service.",
"targetPort": "Number or name of the port to access on the pods targeted by the service. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. If this is a string, it will be looked up as a named port in the target Pod's container ports. If this is not specified, the value of the 'port' field is used (an identity map). This field is ignored for services with clusterIP=None, and should be omitted or set equal to the 'port' field. More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service",
"nodePort": "The port on each node on which this service is exposed when type is NodePort or LoadBalancer. Usually assigned by the system. If a value is specified, in-range, and not in use it will be used, otherwise the operation will fail. If not specified, a port will be allocated if this Service requires one. If this field is specified when creating a Service which does not need it, creation will fail. This field will be wiped when updating a Service to no longer need it (e.g. changing type from NodePort to ClusterIP). More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport",
@@ -2329,7 +2353,7 @@ var map_ServiceSpec = map[string]string{
"type": "type determines how the Service is exposed. Defaults to ClusterIP. Valid options are ExternalName, ClusterIP, NodePort, and LoadBalancer. \"ClusterIP\" allocates a cluster-internal IP address for load-balancing to endpoints. Endpoints are determined by the selector or if that is not specified, by manual construction of an Endpoints object or EndpointSlice objects. If clusterIP is \"None\", no virtual IP is allocated and the endpoints are published as a set of endpoints rather than a virtual IP. \"NodePort\" builds on ClusterIP and allocates a port on every node which routes to the same endpoints as the clusterIP. \"LoadBalancer\" builds on NodePort and creates an external load-balancer (if supported in the current cloud) which routes to the same endpoints as the clusterIP. \"ExternalName\" aliases this service to the specified externalName. Several other fields do not apply to ExternalName services. More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types",
"externalIPs": "externalIPs is a list of IP addresses for which nodes in the cluster will also accept traffic for this service. These IPs are not managed by Kubernetes. The user is responsible for ensuring that traffic arrives at a node with this IP. A common example is external load-balancers that are not part of the Kubernetes system.",
"sessionAffinity": "Supports \"ClientIP\" and \"None\". Used to maintain session affinity. Enable client IP based session affinity. Must be ClientIP or None. Defaults to None. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies",
- "loadBalancerIP": "Only applies to Service Type: LoadBalancer. This feature depends on whether the underlying cloud-provider supports specifying the loadBalancerIP when a load balancer is created. This field will be ignored if the cloud-provider does not support the feature. Deprecated: This field was under-specified and its meaning varies across implementations, and it cannot support dual-stack. As of Kubernetes v1.24, users are encouraged to use implementation-specific annotations when available. This field may be removed in a future API version.",
+ "loadBalancerIP": "Only applies to Service Type: LoadBalancer. This feature depends on whether the underlying cloud-provider supports specifying the loadBalancerIP when a load balancer is created. This field will be ignored if the cloud-provider does not support the feature. Deprecated: This field was under-specified and its meaning varies across implementations. Using it is non-portable and it may not support dual-stack. Users are encouraged to use implementation-specific annotations when available.",
"loadBalancerSourceRanges": "If specified and supported by the platform, this will restrict traffic through the cloud-provider load-balancer will be restricted to the specified client IPs. This field will be ignored if the cloud-provider does not support the feature.\" More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/",
"externalName": "externalName is the external reference that discovery mechanisms will return as an alias for this service (e.g. a DNS CNAME record). No proxying will be involved. Must be a lowercase RFC-1123 hostname (https://tools.ietf.org/html/rfc1123) and requires `type` to be \"ExternalName\".",
"externalTrafficPolicy": "externalTrafficPolicy describes how nodes distribute service traffic they receive on one of the Service's \"externally-facing\" addresses (NodePorts, ExternalIPs, and LoadBalancer IPs). If set to \"Local\", the proxy will configure the service in a way that assumes that external load balancers will take care of balancing the service traffic between nodes, and so each node will deliver traffic only to the node-local endpoints of the service, without masquerading the client source IP. (Traffic mistakenly sent to a node with no endpoints will be dropped.) The default value, \"Cluster\", uses the standard behavior of routing to all endpoints evenly (possibly modified by topology and other features). Note that traffic sent to an External IP or LoadBalancer IP from within the cluster will always get \"Cluster\" semantics, but clients sending to a NodePort from within the cluster may need to take traffic policy into account when picking a node.",
@@ -2612,7 +2636,7 @@ var map_WindowsSecurityContextOptions = map[string]string{
"gmsaCredentialSpecName": "GMSACredentialSpecName is the name of the GMSA credential spec to use.",
"gmsaCredentialSpec": "GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field.",
"runAsUserName": "The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.",
- "hostProcess": "HostProcess determines if a container should be run as a 'Host Process' container. This field is alpha-level and will only be honored by components that enable the WindowsHostProcessContainers feature flag. Setting this field without the feature flag will result in errors when validating the Pod. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true.",
+ "hostProcess": "HostProcess determines if a container should be run as a 'Host Process' container. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true.",
}
func (WindowsSecurityContextOptions) SwaggerDoc() map[string]string {
diff --git a/vendor/k8s.io/api/core/v1/well_known_labels.go b/vendor/k8s.io/api/core/v1/well_known_labels.go
index 5cf82a9817..8c3cb87b82 100644
--- a/vendor/k8s.io/api/core/v1/well_known_labels.go
+++ b/vendor/k8s.io/api/core/v1/well_known_labels.go
@@ -19,6 +19,10 @@ package v1
const (
LabelHostname = "kubernetes.io/hostname"
+ // Label value is the network location of kube-apiserver stored as
+ // Stored in APIServer Identity lease objects to view what address is used for peer proxy
+ AnnotationPeerAdvertiseAddress = "kubernetes.io/peer-advertise-address"
+
LabelTopologyZone = "topology.kubernetes.io/zone"
LabelTopologyRegion = "topology.kubernetes.io/region"
diff --git a/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go
index bfb7e0bff5..d76f0bbbcf 100644
--- a/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go
+++ b/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go
@@ -793,6 +793,11 @@ func (in *Container) DeepCopyInto(out *Container) {
*out = make([]ContainerResizePolicy, len(*in))
copy(*out, *in)
}
+ if in.RestartPolicy != nil {
+ in, out := &in.RestartPolicy, &out.RestartPolicy
+ *out = new(ContainerRestartPolicy)
+ **out = **in
+ }
if in.VolumeMounts != nil {
in, out := &in.VolumeMounts, &out.VolumeMounts
*out = make([]VolumeMount, len(*in))
@@ -1420,6 +1425,11 @@ func (in *EphemeralContainerCommon) DeepCopyInto(out *EphemeralContainerCommon)
*out = make([]ContainerResizePolicy, len(*in))
copy(*out, *in)
}
+ if in.RestartPolicy != nil {
+ in, out := &in.RestartPolicy, &out.RestartPolicy
+ *out = new(ContainerRestartPolicy)
+ **out = **in
+ }
if in.VolumeMounts != nil {
in, out := &in.VolumeMounts, &out.VolumeMounts
*out = make([]VolumeMount, len(*in))
@@ -1871,6 +1881,22 @@ func (in *HostAlias) DeepCopy() *HostAlias {
return out
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *HostIP) DeepCopyInto(out *HostIP) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostIP.
+func (in *HostIP) DeepCopy() *HostIP {
+ if in == nil {
+ return nil
+ }
+ out := new(HostIP)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HostPathVolumeSource) DeepCopyInto(out *HostPathVolumeSource) {
*out = *in
@@ -2895,7 +2921,7 @@ func (in *PersistentVolume) DeepCopyInto(out *PersistentVolume) {
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
- out.Status = in.Status
+ in.Status.DeepCopyInto(&out.Status)
return
}
@@ -3072,10 +3098,12 @@ func (in *PersistentVolumeClaimStatus) DeepCopyInto(out *PersistentVolumeClaimSt
(*out)[key] = val.DeepCopy()
}
}
- if in.ResizeStatus != nil {
- in, out := &in.ResizeStatus, &out.ResizeStatus
- *out = new(PersistentVolumeClaimResizeStatus)
- **out = **in
+ if in.AllocatedResourceStatuses != nil {
+ in, out := &in.AllocatedResourceStatuses, &out.AllocatedResourceStatuses
+ *out = make(map[ResourceName]ClaimResourceStatus, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
}
return
}
@@ -3335,6 +3363,10 @@ func (in *PersistentVolumeSpec) DeepCopy() *PersistentVolumeSpec {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PersistentVolumeStatus) DeepCopyInto(out *PersistentVolumeStatus) {
*out = *in
+ if in.LastPhaseTransitionTime != nil {
+ in, out := &in.LastPhaseTransitionTime, &out.LastPhaseTransitionTime
+ *out = (*in).DeepCopy()
+ }
return
}
@@ -3807,6 +3839,27 @@ func (in *PodResourceClaim) DeepCopy() *PodResourceClaim {
return out
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PodResourceClaimStatus) DeepCopyInto(out *PodResourceClaimStatus) {
+ *out = *in
+ if in.ResourceClaimName != nil {
+ in, out := &in.ResourceClaimName, &out.ResourceClaimName
+ *out = new(string)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodResourceClaimStatus.
+func (in *PodResourceClaimStatus) DeepCopy() *PodResourceClaimStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(PodResourceClaimStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodSchedulingGate) DeepCopyInto(out *PodSchedulingGate) {
*out = *in
@@ -4091,6 +4144,11 @@ func (in *PodStatus) DeepCopyInto(out *PodStatus) {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
+ if in.HostIPs != nil {
+ in, out := &in.HostIPs, &out.HostIPs
+ *out = make([]HostIP, len(*in))
+ copy(*out, *in)
+ }
if in.PodIPs != nil {
in, out := &in.PodIPs, &out.PodIPs
*out = make([]PodIP, len(*in))
@@ -4121,6 +4179,13 @@ func (in *PodStatus) DeepCopyInto(out *PodStatus) {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
+ if in.ResourceClaimStatuses != nil {
+ in, out := &in.ResourceClaimStatuses, &out.ResourceClaimStatuses
+ *out = make([]PodResourceClaimStatus, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
return
}
diff --git a/vendor/k8s.io/api/discovery/v1/generated.proto b/vendor/k8s.io/api/discovery/v1/generated.proto
index b7150ef2cb..490ce89224 100644
--- a/vendor/k8s.io/api/discovery/v1/generated.proto
+++ b/vendor/k8s.io/api/discovery/v1/generated.proto
@@ -146,6 +146,8 @@ message EndpointPort {
//
// * Kubernetes-defined prefixed names:
// * 'kubernetes.io/h2c' - HTTP/2 over cleartext as described in https://www.rfc-editor.org/rfc/rfc7540
+ // * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455
+ // * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455
//
// * Other protocols should use implementation-defined prefixed names such as
// mycompany.com/my-custom-protocol.
diff --git a/vendor/k8s.io/api/discovery/v1/types.go b/vendor/k8s.io/api/discovery/v1/types.go
index 9b4daafca9..efbb09918c 100644
--- a/vendor/k8s.io/api/discovery/v1/types.go
+++ b/vendor/k8s.io/api/discovery/v1/types.go
@@ -196,6 +196,8 @@ type EndpointPort struct {
//
// * Kubernetes-defined prefixed names:
// * 'kubernetes.io/h2c' - HTTP/2 over cleartext as described in https://www.rfc-editor.org/rfc/rfc7540
+ // * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455
+ // * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455
//
// * Other protocols should use implementation-defined prefixed names such as
// mycompany.com/my-custom-protocol.
diff --git a/vendor/k8s.io/api/discovery/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/discovery/v1/types_swagger_doc_generated.go
index c780c9573d..bef7745398 100644
--- a/vendor/k8s.io/api/discovery/v1/types_swagger_doc_generated.go
+++ b/vendor/k8s.io/api/discovery/v1/types_swagger_doc_generated.go
@@ -68,7 +68,7 @@ var map_EndpointPort = map[string]string{
"name": "name represents the name of this port. All ports in an EndpointSlice must have a unique name. If the EndpointSlice is dervied from a Kubernetes service, this corresponds to the Service.ports[].name. Name must either be an empty string or pass DNS_LABEL validation: * must be no more than 63 characters long. * must consist of lower case alphanumeric characters or '-'. * must start and end with an alphanumeric character. Default is empty string.",
"protocol": "protocol represents the IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP.",
"port": "port represents the port number of the endpoint. If this is not specified, ports are not restricted and must be interpreted in the context of the specific consumer.",
- "appProtocol": "The application protocol for this port. This is used as a hint for implementations to offer richer behavior for protocols that they understand. This field follows standard Kubernetes label syntax. Valid values are either:\n\n* Un-prefixed protocol names - reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names).\n\n* Kubernetes-defined prefixed names:\n * 'kubernetes.io/h2c' - HTTP/2 over cleartext as described in https://www.rfc-editor.org/rfc/rfc7540\n\n* Other protocols should use implementation-defined prefixed names such as mycompany.com/my-custom-protocol.",
+ "appProtocol": "The application protocol for this port. This is used as a hint for implementations to offer richer behavior for protocols that they understand. This field follows standard Kubernetes label syntax. Valid values are either:\n\n* Un-prefixed protocol names - reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names).\n\n* Kubernetes-defined prefixed names:\n * 'kubernetes.io/h2c' - HTTP/2 over cleartext as described in https://www.rfc-editor.org/rfc/rfc7540\n * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455\n * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455\n\n* Other protocols should use implementation-defined prefixed names such as mycompany.com/my-custom-protocol.",
}
func (EndpointPort) SwaggerDoc() map[string]string {
diff --git a/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go b/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go
index 863ebbc4a7..d967e38106 100644
--- a/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go
+++ b/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go
@@ -1001,38 +1001,10 @@ func (m *NetworkPolicySpec) XXX_DiscardUnknown() {
var xxx_messageInfo_NetworkPolicySpec proto.InternalMessageInfo
-func (m *NetworkPolicyStatus) Reset() { *m = NetworkPolicyStatus{} }
-func (*NetworkPolicyStatus) ProtoMessage() {}
-func (*NetworkPolicyStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_cdc93917efc28165, []int{34}
-}
-func (m *NetworkPolicyStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *NetworkPolicyStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *NetworkPolicyStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_NetworkPolicyStatus.Merge(m, src)
-}
-func (m *NetworkPolicyStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *NetworkPolicyStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_NetworkPolicyStatus.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_NetworkPolicyStatus proto.InternalMessageInfo
-
func (m *ReplicaSet) Reset() { *m = ReplicaSet{} }
func (*ReplicaSet) ProtoMessage() {}
func (*ReplicaSet) Descriptor() ([]byte, []int) {
- return fileDescriptor_cdc93917efc28165, []int{35}
+ return fileDescriptor_cdc93917efc28165, []int{34}
}
func (m *ReplicaSet) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1060,7 +1032,7 @@ var xxx_messageInfo_ReplicaSet proto.InternalMessageInfo
func (m *ReplicaSetCondition) Reset() { *m = ReplicaSetCondition{} }
func (*ReplicaSetCondition) ProtoMessage() {}
func (*ReplicaSetCondition) Descriptor() ([]byte, []int) {
- return fileDescriptor_cdc93917efc28165, []int{36}
+ return fileDescriptor_cdc93917efc28165, []int{35}
}
func (m *ReplicaSetCondition) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1088,7 +1060,7 @@ var xxx_messageInfo_ReplicaSetCondition proto.InternalMessageInfo
func (m *ReplicaSetList) Reset() { *m = ReplicaSetList{} }
func (*ReplicaSetList) ProtoMessage() {}
func (*ReplicaSetList) Descriptor() ([]byte, []int) {
- return fileDescriptor_cdc93917efc28165, []int{37}
+ return fileDescriptor_cdc93917efc28165, []int{36}
}
func (m *ReplicaSetList) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1116,7 +1088,7 @@ var xxx_messageInfo_ReplicaSetList proto.InternalMessageInfo
func (m *ReplicaSetSpec) Reset() { *m = ReplicaSetSpec{} }
func (*ReplicaSetSpec) ProtoMessage() {}
func (*ReplicaSetSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_cdc93917efc28165, []int{38}
+ return fileDescriptor_cdc93917efc28165, []int{37}
}
func (m *ReplicaSetSpec) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1144,7 +1116,7 @@ var xxx_messageInfo_ReplicaSetSpec proto.InternalMessageInfo
func (m *ReplicaSetStatus) Reset() { *m = ReplicaSetStatus{} }
func (*ReplicaSetStatus) ProtoMessage() {}
func (*ReplicaSetStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_cdc93917efc28165, []int{39}
+ return fileDescriptor_cdc93917efc28165, []int{38}
}
func (m *ReplicaSetStatus) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1172,7 +1144,7 @@ var xxx_messageInfo_ReplicaSetStatus proto.InternalMessageInfo
func (m *RollbackConfig) Reset() { *m = RollbackConfig{} }
func (*RollbackConfig) ProtoMessage() {}
func (*RollbackConfig) Descriptor() ([]byte, []int) {
- return fileDescriptor_cdc93917efc28165, []int{40}
+ return fileDescriptor_cdc93917efc28165, []int{39}
}
func (m *RollbackConfig) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1200,7 +1172,7 @@ var xxx_messageInfo_RollbackConfig proto.InternalMessageInfo
func (m *RollingUpdateDaemonSet) Reset() { *m = RollingUpdateDaemonSet{} }
func (*RollingUpdateDaemonSet) ProtoMessage() {}
func (*RollingUpdateDaemonSet) Descriptor() ([]byte, []int) {
- return fileDescriptor_cdc93917efc28165, []int{41}
+ return fileDescriptor_cdc93917efc28165, []int{40}
}
func (m *RollingUpdateDaemonSet) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1228,7 +1200,7 @@ var xxx_messageInfo_RollingUpdateDaemonSet proto.InternalMessageInfo
func (m *RollingUpdateDeployment) Reset() { *m = RollingUpdateDeployment{} }
func (*RollingUpdateDeployment) ProtoMessage() {}
func (*RollingUpdateDeployment) Descriptor() ([]byte, []int) {
- return fileDescriptor_cdc93917efc28165, []int{42}
+ return fileDescriptor_cdc93917efc28165, []int{41}
}
func (m *RollingUpdateDeployment) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1256,7 +1228,7 @@ var xxx_messageInfo_RollingUpdateDeployment proto.InternalMessageInfo
func (m *Scale) Reset() { *m = Scale{} }
func (*Scale) ProtoMessage() {}
func (*Scale) Descriptor() ([]byte, []int) {
- return fileDescriptor_cdc93917efc28165, []int{43}
+ return fileDescriptor_cdc93917efc28165, []int{42}
}
func (m *Scale) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1284,7 +1256,7 @@ var xxx_messageInfo_Scale proto.InternalMessageInfo
func (m *ScaleSpec) Reset() { *m = ScaleSpec{} }
func (*ScaleSpec) ProtoMessage() {}
func (*ScaleSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_cdc93917efc28165, []int{44}
+ return fileDescriptor_cdc93917efc28165, []int{43}
}
func (m *ScaleSpec) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1312,7 +1284,7 @@ var xxx_messageInfo_ScaleSpec proto.InternalMessageInfo
func (m *ScaleStatus) Reset() { *m = ScaleStatus{} }
func (*ScaleStatus) ProtoMessage() {}
func (*ScaleStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_cdc93917efc28165, []int{45}
+ return fileDescriptor_cdc93917efc28165, []int{44}
}
func (m *ScaleStatus) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1373,7 +1345,6 @@ func init() {
proto.RegisterType((*NetworkPolicyPeer)(nil), "k8s.io.api.extensions.v1beta1.NetworkPolicyPeer")
proto.RegisterType((*NetworkPolicyPort)(nil), "k8s.io.api.extensions.v1beta1.NetworkPolicyPort")
proto.RegisterType((*NetworkPolicySpec)(nil), "k8s.io.api.extensions.v1beta1.NetworkPolicySpec")
- proto.RegisterType((*NetworkPolicyStatus)(nil), "k8s.io.api.extensions.v1beta1.NetworkPolicyStatus")
proto.RegisterType((*ReplicaSet)(nil), "k8s.io.api.extensions.v1beta1.ReplicaSet")
proto.RegisterType((*ReplicaSetCondition)(nil), "k8s.io.api.extensions.v1beta1.ReplicaSetCondition")
proto.RegisterType((*ReplicaSetList)(nil), "k8s.io.api.extensions.v1beta1.ReplicaSetList")
@@ -1393,188 +1364,186 @@ func init() {
}
var fileDescriptor_cdc93917efc28165 = []byte{
- // 2890 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5b, 0xcf, 0x6f, 0x24, 0x47,
- 0xf5, 0xdf, 0x9e, 0xf1, 0xd8, 0xe3, 0xe7, 0xb5, 0xbd, 0x5b, 0xeb, 0xac, 0x1d, 0xef, 0x37, 0x76,
- 0xd4, 0x5f, 0x11, 0x36, 0x61, 0x33, 0xc3, 0x6e, 0x92, 0x25, 0x3f, 0xa4, 0x84, 0x1d, 0xef, 0x26,
- 0xeb, 0xc4, 0x1e, 0x4f, 0x6a, 0xc6, 0x09, 0x8a, 0x08, 0xd0, 0xee, 0x29, 0x8f, 0x3b, 0xee, 0xe9,
- 0x1e, 0x75, 0xd7, 0x98, 0x35, 0x27, 0x10, 0x5c, 0x72, 0x82, 0x4b, 0x20, 0x47, 0x10, 0x12, 0x57,
- 0xae, 0x1c, 0x42, 0x04, 0x22, 0x48, 0x2b, 0xc4, 0x21, 0x12, 0x07, 0x72, 0xb2, 0x88, 0x73, 0x42,
- 0xfc, 0x03, 0x68, 0x4f, 0xa8, 0x7e, 0x74, 0xf5, 0x6f, 0xbb, 0xc7, 0x38, 0x16, 0x41, 0x9c, 0x3c,
- 0x5d, 0xef, 0xbd, 0x4f, 0xbd, 0xaa, 0x7a, 0xf5, 0xde, 0xa7, 0xba, 0xda, 0xf0, 0xf2, 0xee, 0xb3,
- 0x7e, 0xcd, 0x72, 0xeb, 0xbb, 0xc3, 0x2d, 0xe2, 0x39, 0x84, 0x12, 0xbf, 0xbe, 0x47, 0x9c, 0xae,
- 0xeb, 0xd5, 0xa5, 0xc0, 0x18, 0x58, 0x75, 0x72, 0x8f, 0x12, 0xc7, 0xb7, 0x5c, 0xc7, 0xaf, 0xef,
- 0x5d, 0xdf, 0x22, 0xd4, 0xb8, 0x5e, 0xef, 0x11, 0x87, 0x78, 0x06, 0x25, 0xdd, 0xda, 0xc0, 0x73,
- 0xa9, 0x8b, 0x1e, 0x11, 0xea, 0x35, 0x63, 0x60, 0xd5, 0x42, 0xf5, 0x9a, 0x54, 0x5f, 0x7c, 0xb2,
- 0x67, 0xd1, 0x9d, 0xe1, 0x56, 0xcd, 0x74, 0xfb, 0xf5, 0x9e, 0xdb, 0x73, 0xeb, 0xdc, 0x6a, 0x6b,
- 0xb8, 0xcd, 0x9f, 0xf8, 0x03, 0xff, 0x25, 0xd0, 0x16, 0xf5, 0x48, 0xe7, 0xa6, 0xeb, 0x91, 0xfa,
- 0x5e, 0xaa, 0xc7, 0xc5, 0xa7, 0x43, 0x9d, 0xbe, 0x61, 0xee, 0x58, 0x0e, 0xf1, 0xf6, 0xeb, 0x83,
- 0xdd, 0x1e, 0x6b, 0xf0, 0xeb, 0x7d, 0x42, 0x8d, 0x2c, 0xab, 0x7a, 0x9e, 0x95, 0x37, 0x74, 0xa8,
- 0xd5, 0x27, 0x29, 0x83, 0x9b, 0xc7, 0x19, 0xf8, 0xe6, 0x0e, 0xe9, 0x1b, 0x29, 0xbb, 0xa7, 0xf2,
- 0xec, 0x86, 0xd4, 0xb2, 0xeb, 0x96, 0x43, 0x7d, 0xea, 0x25, 0x8d, 0xf4, 0xf7, 0x4a, 0x30, 0x79,
- 0xdb, 0x20, 0x7d, 0xd7, 0x69, 0x13, 0x8a, 0xbe, 0x03, 0x55, 0x36, 0x8c, 0xae, 0x41, 0x8d, 0x05,
- 0xed, 0x51, 0xed, 0xea, 0xd4, 0x8d, 0xaf, 0xd6, 0xc2, 0x69, 0x56, 0xa8, 0xb5, 0xc1, 0x6e, 0x8f,
- 0x35, 0xf8, 0x35, 0xa6, 0x5d, 0xdb, 0xbb, 0x5e, 0xdb, 0xd8, 0x7a, 0x87, 0x98, 0x74, 0x9d, 0x50,
- 0xa3, 0x81, 0xee, 0x1f, 0x2c, 0x9f, 0x3b, 0x3c, 0x58, 0x86, 0xb0, 0x0d, 0x2b, 0x54, 0xd4, 0x84,
- 0x31, 0x7f, 0x40, 0xcc, 0x85, 0x12, 0x47, 0xbf, 0x56, 0x3b, 0x72, 0x11, 0x6b, 0xca, 0xb3, 0xf6,
- 0x80, 0x98, 0x8d, 0xf3, 0x12, 0x79, 0x8c, 0x3d, 0x61, 0x8e, 0x83, 0xde, 0x80, 0x71, 0x9f, 0x1a,
- 0x74, 0xe8, 0x2f, 0x94, 0x39, 0x62, 0xad, 0x30, 0x22, 0xb7, 0x6a, 0xcc, 0x48, 0xcc, 0x71, 0xf1,
- 0x8c, 0x25, 0x9a, 0xfe, 0xf7, 0x12, 0x20, 0xa5, 0xbb, 0xe2, 0x3a, 0x5d, 0x8b, 0x5a, 0xae, 0x83,
- 0x9e, 0x87, 0x31, 0xba, 0x3f, 0x20, 0x7c, 0x72, 0x26, 0x1b, 0x8f, 0x05, 0x0e, 0x75, 0xf6, 0x07,
- 0xe4, 0xc1, 0xc1, 0xf2, 0xe5, 0xb4, 0x05, 0x93, 0x60, 0x6e, 0x83, 0xd6, 0x94, 0xab, 0x25, 0x6e,
- 0xfd, 0x74, 0xbc, 0xeb, 0x07, 0x07, 0xcb, 0x19, 0x41, 0x58, 0x53, 0x48, 0x71, 0x07, 0xd1, 0x1e,
- 0x20, 0xdb, 0xf0, 0x69, 0xc7, 0x33, 0x1c, 0x5f, 0xf4, 0x64, 0xf5, 0x89, 0x9c, 0x84, 0x27, 0x8a,
- 0x2d, 0x1a, 0xb3, 0x68, 0x2c, 0x4a, 0x2f, 0xd0, 0x5a, 0x0a, 0x0d, 0x67, 0xf4, 0x80, 0x1e, 0x83,
- 0x71, 0x8f, 0x18, 0xbe, 0xeb, 0x2c, 0x8c, 0xf1, 0x51, 0xa8, 0x09, 0xc4, 0xbc, 0x15, 0x4b, 0x29,
- 0x7a, 0x1c, 0x26, 0xfa, 0xc4, 0xf7, 0x8d, 0x1e, 0x59, 0xa8, 0x70, 0xc5, 0x59, 0xa9, 0x38, 0xb1,
- 0x2e, 0x9a, 0x71, 0x20, 0xd7, 0x3f, 0xd0, 0x60, 0x5a, 0xcd, 0xdc, 0x9a, 0xe5, 0x53, 0xf4, 0xcd,
- 0x54, 0x1c, 0xd6, 0x8a, 0x0d, 0x89, 0x59, 0xf3, 0x28, 0xbc, 0x20, 0x7b, 0xab, 0x06, 0x2d, 0x91,
- 0x18, 0x5c, 0x87, 0x8a, 0x45, 0x49, 0x9f, 0xad, 0x43, 0xf9, 0xea, 0xd4, 0x8d, 0xab, 0x45, 0x43,
- 0xa6, 0x31, 0x2d, 0x41, 0x2b, 0xab, 0xcc, 0x1c, 0x0b, 0x14, 0xfd, 0xa7, 0x63, 0x11, 0xf7, 0x59,
- 0x68, 0xa2, 0xb7, 0xa1, 0xea, 0x13, 0x9b, 0x98, 0xd4, 0xf5, 0xa4, 0xfb, 0x4f, 0x15, 0x74, 0xdf,
- 0xd8, 0x22, 0x76, 0x5b, 0x9a, 0x36, 0xce, 0x33, 0xff, 0x83, 0x27, 0xac, 0x20, 0xd1, 0xeb, 0x50,
- 0xa5, 0xa4, 0x3f, 0xb0, 0x0d, 0x4a, 0xe4, 0x3e, 0xfa, 0xff, 0xe8, 0x10, 0x58, 0xe4, 0x30, 0xb0,
- 0x96, 0xdb, 0xed, 0x48, 0x35, 0xbe, 0x7d, 0xd4, 0x94, 0x04, 0xad, 0x58, 0xc1, 0xa0, 0x3d, 0x98,
- 0x19, 0x0e, 0xba, 0x4c, 0x93, 0xb2, 0xec, 0xd0, 0xdb, 0x97, 0x91, 0x74, 0xb3, 0xe8, 0xdc, 0x6c,
- 0xc6, 0xac, 0x1b, 0x97, 0x65, 0x5f, 0x33, 0xf1, 0x76, 0x9c, 0xe8, 0x05, 0xdd, 0x82, 0xd9, 0xbe,
- 0xe5, 0x60, 0x62, 0x74, 0xf7, 0xdb, 0xc4, 0x74, 0x9d, 0xae, 0xcf, 0xc3, 0xaa, 0xd2, 0x98, 0x97,
- 0x00, 0xb3, 0xeb, 0x71, 0x31, 0x4e, 0xea, 0xa3, 0x57, 0x01, 0x05, 0xc3, 0x78, 0x45, 0x24, 0x37,
- 0xcb, 0x75, 0x78, 0xcc, 0x95, 0xc3, 0xe0, 0xee, 0xa4, 0x34, 0x70, 0x86, 0x15, 0x5a, 0x83, 0x39,
- 0x8f, 0xec, 0x59, 0x6c, 0x8c, 0x77, 0x2d, 0x9f, 0xba, 0xde, 0xfe, 0x9a, 0xd5, 0xb7, 0xe8, 0xc2,
- 0x38, 0xf7, 0x69, 0xe1, 0xf0, 0x60, 0x79, 0x0e, 0x67, 0xc8, 0x71, 0xa6, 0x95, 0xfe, 0xb3, 0x71,
- 0x98, 0x4d, 0xe4, 0x1b, 0xf4, 0x06, 0x5c, 0x36, 0x87, 0x9e, 0x47, 0x1c, 0xda, 0x1c, 0xf6, 0xb7,
- 0x88, 0xd7, 0x36, 0x77, 0x48, 0x77, 0x68, 0x93, 0x2e, 0x0f, 0x94, 0x4a, 0x63, 0x49, 0x7a, 0x7c,
- 0x79, 0x25, 0x53, 0x0b, 0xe7, 0x58, 0xb3, 0x59, 0x70, 0x78, 0xd3, 0xba, 0xe5, 0xfb, 0x0a, 0xb3,
- 0xc4, 0x31, 0xd5, 0x2c, 0x34, 0x53, 0x1a, 0x38, 0xc3, 0x8a, 0xf9, 0xd8, 0x25, 0xbe, 0xe5, 0x91,
- 0x6e, 0xd2, 0xc7, 0x72, 0xdc, 0xc7, 0xdb, 0x99, 0x5a, 0x38, 0xc7, 0x1a, 0x3d, 0x03, 0x53, 0xa2,
- 0x37, 0xbe, 0x7e, 0x72, 0xa1, 0x2f, 0x49, 0xb0, 0xa9, 0x66, 0x28, 0xc2, 0x51, 0x3d, 0x36, 0x34,
- 0x77, 0xcb, 0x27, 0xde, 0x1e, 0xe9, 0xe6, 0x2f, 0xf0, 0x46, 0x4a, 0x03, 0x67, 0x58, 0xb1, 0xa1,
- 0x89, 0x08, 0x4c, 0x0d, 0x6d, 0x3c, 0x3e, 0xb4, 0xcd, 0x4c, 0x2d, 0x9c, 0x63, 0xcd, 0xe2, 0x58,
- 0xb8, 0x7c, 0x6b, 0xcf, 0xb0, 0x6c, 0x63, 0xcb, 0x26, 0x0b, 0x13, 0xf1, 0x38, 0x6e, 0xc6, 0xc5,
- 0x38, 0xa9, 0x8f, 0x5e, 0x81, 0x8b, 0xa2, 0x69, 0xd3, 0x31, 0x14, 0x48, 0x95, 0x83, 0x3c, 0x2c,
- 0x41, 0x2e, 0x36, 0x93, 0x0a, 0x38, 0x6d, 0x83, 0x9e, 0x87, 0x19, 0xd3, 0xb5, 0x6d, 0x1e, 0x8f,
- 0x2b, 0xee, 0xd0, 0xa1, 0x0b, 0x93, 0x1c, 0x05, 0xb1, 0xfd, 0xb8, 0x12, 0x93, 0xe0, 0x84, 0x26,
- 0x22, 0x00, 0x66, 0x50, 0x70, 0xfc, 0x05, 0xe0, 0xf9, 0xf1, 0x7a, 0xd1, 0x1c, 0xa0, 0x4a, 0x55,
- 0xc8, 0x01, 0x54, 0x93, 0x8f, 0x23, 0xc0, 0xfa, 0x9f, 0x34, 0x98, 0xcf, 0x49, 0x1d, 0xe8, 0xa5,
- 0x58, 0x89, 0xfd, 0x4a, 0xa2, 0xc4, 0x5e, 0xc9, 0x31, 0x8b, 0xd4, 0x59, 0x07, 0xa6, 0x3d, 0x36,
- 0x2a, 0xa7, 0x27, 0x54, 0x64, 0x8e, 0x7c, 0xe6, 0x98, 0x61, 0xe0, 0xa8, 0x4d, 0x98, 0xf3, 0x2f,
- 0x1e, 0x1e, 0x2c, 0x4f, 0xc7, 0x64, 0x38, 0x0e, 0xaf, 0xbf, 0x5f, 0x02, 0xb8, 0x4d, 0x06, 0xb6,
- 0xbb, 0xdf, 0x27, 0xce, 0x59, 0x70, 0xa8, 0x8d, 0x18, 0x87, 0x7a, 0xf2, 0xb8, 0xe5, 0x51, 0xae,
- 0xe5, 0x92, 0xa8, 0x37, 0x13, 0x24, 0xaa, 0x5e, 0x1c, 0xf2, 0x68, 0x16, 0xf5, 0xd7, 0x32, 0x5c,
- 0x0a, 0x95, 0x43, 0x1a, 0xf5, 0x42, 0x6c, 0x8d, 0xbf, 0x9c, 0x58, 0xe3, 0xf9, 0x0c, 0x93, 0xcf,
- 0x8d, 0x47, 0xbd, 0x03, 0x33, 0x8c, 0xe5, 0x88, 0xb5, 0xe4, 0x1c, 0x6a, 0x7c, 0x64, 0x0e, 0xa5,
- 0xaa, 0xdd, 0x5a, 0x0c, 0x09, 0x27, 0x90, 0x73, 0x38, 0xdb, 0xc4, 0x17, 0x91, 0xb3, 0x7d, 0xa8,
- 0xc1, 0x4c, 0xb8, 0x4c, 0x67, 0x40, 0xda, 0x9a, 0x71, 0xd2, 0xf6, 0x78, 0xe1, 0x10, 0xcd, 0x61,
- 0x6d, 0xff, 0x64, 0x04, 0x5f, 0x29, 0xb1, 0x0d, 0xbe, 0x65, 0x98, 0xbb, 0xe8, 0x51, 0x18, 0x73,
- 0x8c, 0x7e, 0x10, 0x99, 0x6a, 0xb3, 0x34, 0x8d, 0x3e, 0xc1, 0x5c, 0x82, 0xde, 0xd3, 0x00, 0xc9,
- 0x2a, 0x70, 0xcb, 0x71, 0x5c, 0x6a, 0x88, 0x5c, 0x29, 0xdc, 0x5a, 0x2d, 0xec, 0x56, 0xd0, 0x63,
- 0x6d, 0x33, 0x85, 0x75, 0xc7, 0xa1, 0xde, 0x7e, 0xb8, 0xc8, 0x69, 0x05, 0x9c, 0xe1, 0x00, 0x32,
- 0x00, 0x3c, 0x89, 0xd9, 0x71, 0xe5, 0x46, 0x7e, 0xb2, 0x40, 0xce, 0x63, 0x06, 0x2b, 0xae, 0xb3,
- 0x6d, 0xf5, 0xc2, 0xb4, 0x83, 0x15, 0x10, 0x8e, 0x80, 0x2e, 0xde, 0x81, 0xf9, 0x1c, 0x6f, 0xd1,
- 0x05, 0x28, 0xef, 0x92, 0x7d, 0x31, 0x6d, 0x98, 0xfd, 0x44, 0x73, 0x50, 0xd9, 0x33, 0xec, 0xa1,
- 0x48, 0xbf, 0x93, 0x58, 0x3c, 0x3c, 0x5f, 0x7a, 0x56, 0xd3, 0x3f, 0xa8, 0x44, 0x63, 0x87, 0x33,
- 0xe6, 0xab, 0x50, 0xf5, 0xc8, 0xc0, 0xb6, 0x4c, 0xc3, 0x97, 0x44, 0x88, 0x93, 0x5f, 0x2c, 0xdb,
- 0xb0, 0x92, 0xc6, 0xb8, 0x75, 0xe9, 0xf3, 0xe5, 0xd6, 0xe5, 0xd3, 0xe1, 0xd6, 0xdf, 0x86, 0xaa,
- 0x1f, 0xb0, 0xea, 0x31, 0x0e, 0x79, 0x7d, 0x84, 0xfc, 0x2a, 0x09, 0xb5, 0xea, 0x40, 0x51, 0x69,
- 0x05, 0x9a, 0x45, 0xa2, 0x2b, 0x23, 0x92, 0xe8, 0x53, 0x25, 0xbe, 0x2c, 0xdf, 0x0c, 0x8c, 0xa1,
- 0x4f, 0xba, 0x3c, 0xb7, 0x55, 0xc3, 0x7c, 0xd3, 0xe2, 0xad, 0x58, 0x4a, 0xd1, 0xdb, 0xb1, 0x90,
- 0xad, 0x9e, 0x24, 0x64, 0x67, 0xf2, 0xc3, 0x15, 0x6d, 0xc2, 0xfc, 0xc0, 0x73, 0x7b, 0x1e, 0xf1,
- 0xfd, 0xdb, 0xc4, 0xe8, 0xda, 0x96, 0x43, 0x82, 0xf9, 0x11, 0x8c, 0xe8, 0xca, 0xe1, 0xc1, 0xf2,
- 0x7c, 0x2b, 0x5b, 0x05, 0xe7, 0xd9, 0xea, 0xf7, 0xc7, 0xe0, 0x42, 0xb2, 0x02, 0xe6, 0x90, 0x54,
- 0xed, 0x44, 0x24, 0xf5, 0x5a, 0x64, 0x33, 0x08, 0x06, 0xaf, 0x56, 0x3f, 0x63, 0x43, 0xdc, 0x82,
- 0x59, 0x99, 0x0d, 0x02, 0xa1, 0xa4, 0xe9, 0x6a, 0xf5, 0x37, 0xe3, 0x62, 0x9c, 0xd4, 0x47, 0x2f,
- 0xc0, 0xb4, 0xc7, 0x79, 0x77, 0x00, 0x20, 0xb8, 0xeb, 0x43, 0x12, 0x60, 0x1a, 0x47, 0x85, 0x38,
- 0xae, 0xcb, 0x78, 0x6b, 0x48, 0x47, 0x03, 0x80, 0xb1, 0x38, 0x6f, 0xbd, 0x95, 0x54, 0xc0, 0x69,
- 0x1b, 0xb4, 0x0e, 0x97, 0x86, 0x4e, 0x1a, 0x4a, 0x84, 0xf2, 0x15, 0x09, 0x75, 0x69, 0x33, 0xad,
- 0x82, 0xb3, 0xec, 0xd0, 0x76, 0x8c, 0xca, 0x8e, 0xf3, 0xf4, 0x7c, 0xa3, 0xf0, 0xc6, 0x2b, 0xcc,
- 0x65, 0x33, 0xe8, 0x76, 0xb5, 0x28, 0xdd, 0xd6, 0x7f, 0xaf, 0x45, 0x8b, 0x90, 0xa2, 0xc0, 0xc7,
- 0xbd, 0x65, 0x4a, 0x59, 0x44, 0xd8, 0x91, 0x9b, 0xcd, 0x7e, 0x6f, 0x8e, 0xc4, 0x7e, 0xc3, 0xe2,
- 0x79, 0x3c, 0xfd, 0xfd, 0x83, 0x06, 0xb3, 0x77, 0x3b, 0x9d, 0xd6, 0xaa, 0xc3, 0x77, 0x4b, 0xcb,
- 0xa0, 0x3b, 0xac, 0x8a, 0x0e, 0x0c, 0xba, 0x93, 0xac, 0xa2, 0x4c, 0x86, 0xb9, 0x04, 0x3d, 0x0d,
- 0x55, 0xf6, 0x97, 0x39, 0xce, 0xc3, 0x75, 0x92, 0x27, 0x99, 0x6a, 0x4b, 0xb6, 0x3d, 0x88, 0xfc,
- 0xc6, 0x4a, 0x13, 0x7d, 0x03, 0x26, 0xd8, 0xde, 0x26, 0x4e, 0xb7, 0x20, 0xf9, 0x95, 0x4e, 0x35,
- 0x84, 0x51, 0xc8, 0x67, 0x64, 0x03, 0x0e, 0xe0, 0xf4, 0x5d, 0x98, 0x8b, 0x0c, 0x02, 0x0f, 0x6d,
- 0xf2, 0x06, 0xab, 0x57, 0xa8, 0x0d, 0x15, 0xd6, 0x3b, 0xab, 0x4a, 0xe5, 0x02, 0xaf, 0x17, 0x13,
- 0x13, 0x11, 0x72, 0x0f, 0xf6, 0xe4, 0x63, 0x81, 0xa5, 0x6f, 0xc0, 0xc4, 0x6a, 0xab, 0x61, 0xbb,
- 0x82, 0x6f, 0x98, 0x56, 0xd7, 0x4b, 0xce, 0xd4, 0xca, 0xea, 0x6d, 0x8c, 0xb9, 0x04, 0xe9, 0x30,
- 0x4e, 0xee, 0x99, 0x64, 0x40, 0x39, 0xc5, 0x98, 0x6c, 0x00, 0x4b, 0xa4, 0x77, 0x78, 0x0b, 0x96,
- 0x12, 0xfd, 0xc7, 0x25, 0x98, 0x90, 0xdd, 0x9e, 0xc1, 0xf9, 0x63, 0x2d, 0x76, 0xfe, 0x78, 0xa2,
- 0xd8, 0x12, 0xe4, 0x1e, 0x3e, 0x3a, 0x89, 0xc3, 0xc7, 0xb5, 0x82, 0x78, 0x47, 0x9f, 0x3c, 0xde,
- 0x2d, 0xc1, 0x4c, 0x7c, 0xf1, 0xd1, 0x33, 0x30, 0xc5, 0x52, 0xad, 0x65, 0x92, 0x66, 0xc8, 0xf0,
- 0xd4, 0xeb, 0x87, 0x76, 0x28, 0xc2, 0x51, 0x3d, 0xd4, 0x53, 0x66, 0x2d, 0xd7, 0xa3, 0x72, 0xd0,
- 0xf9, 0x53, 0x3a, 0xa4, 0x96, 0x5d, 0x13, 0x2f, 0xdb, 0x6b, 0xab, 0x0e, 0xdd, 0xf0, 0xda, 0xd4,
- 0xb3, 0x9c, 0x5e, 0xaa, 0x23, 0x06, 0x86, 0xa3, 0xc8, 0xe8, 0x4d, 0x96, 0xf6, 0x7d, 0x77, 0xe8,
- 0x99, 0x24, 0x8b, 0xbe, 0x05, 0xd4, 0x83, 0x6d, 0x84, 0xee, 0x9a, 0x6b, 0x1a, 0xb6, 0x58, 0x1c,
- 0x4c, 0xb6, 0x89, 0x47, 0x1c, 0x93, 0x04, 0x94, 0x49, 0x40, 0x60, 0x05, 0xa6, 0xff, 0x46, 0x83,
- 0x29, 0x39, 0x17, 0x67, 0x40, 0xd4, 0x5f, 0x8b, 0x13, 0xf5, 0xc7, 0x0a, 0xee, 0xd0, 0x6c, 0x96,
- 0xfe, 0x5b, 0x0d, 0x16, 0x03, 0xd7, 0x5d, 0xa3, 0xdb, 0x30, 0x6c, 0xc3, 0x31, 0x89, 0x17, 0xc4,
- 0xfa, 0x22, 0x94, 0xac, 0x81, 0x5c, 0x49, 0x90, 0x00, 0xa5, 0xd5, 0x16, 0x2e, 0x59, 0x03, 0x56,
- 0x45, 0x77, 0x5c, 0x9f, 0x72, 0x36, 0x2f, 0x0e, 0x8a, 0xca, 0xeb, 0xbb, 0xb2, 0x1d, 0x2b, 0x0d,
- 0xb4, 0x09, 0x95, 0x81, 0xeb, 0x51, 0x56, 0xb9, 0xca, 0x89, 0xf5, 0x3d, 0xc2, 0x6b, 0xb6, 0x6e,
- 0x32, 0x10, 0xc3, 0x9d, 0xce, 0x60, 0xb0, 0x40, 0xd3, 0x7f, 0xa0, 0xc1, 0xc3, 0x19, 0xfe, 0x4b,
- 0xd2, 0xd0, 0x85, 0x09, 0x4b, 0x08, 0x65, 0x7a, 0x79, 0xae, 0x58, 0xb7, 0x19, 0x53, 0x11, 0xa6,
- 0xb6, 0x20, 0x85, 0x05, 0xd0, 0xfa, 0x2f, 0x35, 0xb8, 0x98, 0xf2, 0x97, 0xa7, 0x68, 0x16, 0xcf,
- 0x92, 0x6d, 0xab, 0x14, 0xcd, 0xc2, 0x92, 0x4b, 0xd0, 0x6b, 0x50, 0xe5, 0x77, 0x44, 0xa6, 0x6b,
- 0xcb, 0x09, 0xac, 0x07, 0x13, 0xd8, 0x92, 0xed, 0x0f, 0x0e, 0x96, 0xaf, 0x64, 0x9c, 0xb5, 0x03,
- 0x31, 0x56, 0x00, 0x68, 0x19, 0x2a, 0xc4, 0xf3, 0x5c, 0x4f, 0x26, 0xfb, 0x49, 0x36, 0x53, 0x77,
- 0x58, 0x03, 0x16, 0xed, 0xfa, 0xaf, 0xc2, 0x20, 0x65, 0xd9, 0x97, 0xf9, 0xc7, 0x16, 0x27, 0x99,
- 0x18, 0xd9, 0xd2, 0x61, 0x2e, 0x41, 0x43, 0xb8, 0x60, 0x25, 0xd2, 0xb5, 0xdc, 0x9d, 0xf5, 0x62,
- 0xd3, 0xa8, 0xcc, 0x1a, 0x0b, 0x12, 0xfe, 0x42, 0x52, 0x82, 0x53, 0x5d, 0xe8, 0x04, 0x52, 0x5a,
- 0xe8, 0x75, 0x18, 0xdb, 0xa1, 0x74, 0x90, 0xf1, 0xb2, 0xff, 0x98, 0x22, 0x11, 0xba, 0x50, 0xe5,
- 0xa3, 0xeb, 0x74, 0x5a, 0x98, 0x43, 0xe9, 0xbf, 0x2b, 0xa9, 0xf9, 0xe0, 0x27, 0xa4, 0xaf, 0xab,
- 0xd1, 0xae, 0xd8, 0x86, 0xef, 0xf3, 0x14, 0x26, 0x4e, 0xf3, 0x73, 0x11, 0xc7, 0x95, 0x0c, 0xa7,
- 0xb4, 0x51, 0x27, 0x2c, 0x9e, 0xda, 0x49, 0x8a, 0xe7, 0x54, 0x56, 0xe1, 0x44, 0x77, 0xa1, 0x4c,
- 0xed, 0xa2, 0xa7, 0x72, 0x89, 0xd8, 0x59, 0x6b, 0x37, 0xa6, 0xe4, 0x94, 0x97, 0x3b, 0x6b, 0x6d,
- 0xcc, 0x20, 0xd0, 0x06, 0x54, 0xbc, 0xa1, 0x4d, 0x58, 0x1d, 0x28, 0x17, 0xaf, 0x2b, 0x6c, 0x06,
- 0xc3, 0xcd, 0xc7, 0x9e, 0x7c, 0x2c, 0x70, 0xf4, 0x1f, 0x6a, 0x30, 0x1d, 0xab, 0x16, 0xc8, 0x83,
- 0xf3, 0x76, 0x64, 0xef, 0xc8, 0x79, 0x78, 0x76, 0xf4, 0x5d, 0x27, 0x37, 0xfd, 0x9c, 0xec, 0xf7,
- 0x7c, 0x54, 0x86, 0x63, 0x7d, 0xe8, 0x06, 0x40, 0x38, 0x6c, 0xb6, 0x0f, 0x58, 0xf0, 0x8a, 0x0d,
- 0x2f, 0xf7, 0x01, 0x8b, 0x69, 0x1f, 0x8b, 0x76, 0x74, 0x03, 0xc0, 0x27, 0xa6, 0x47, 0x68, 0x33,
- 0x4c, 0x5c, 0xaa, 0x1c, 0xb7, 0x95, 0x04, 0x47, 0xb4, 0xf4, 0x5f, 0x94, 0x60, 0xba, 0x49, 0xe8,
- 0x77, 0x5d, 0x6f, 0xb7, 0xe5, 0xda, 0x96, 0xb9, 0x7f, 0x06, 0x24, 0x00, 0xc7, 0x48, 0xc0, 0x71,
- 0xf9, 0x32, 0xe6, 0x5d, 0x2e, 0x15, 0x78, 0x2b, 0x41, 0x05, 0x6e, 0x8c, 0x84, 0x7a, 0x34, 0x21,
- 0xf8, 0x50, 0x83, 0xf9, 0x98, 0xfe, 0x9d, 0x30, 0xd7, 0xa8, 0xe4, 0xaf, 0x15, 0x4a, 0xfe, 0x31,
- 0x18, 0x96, 0x30, 0xb3, 0x93, 0x3f, 0x5a, 0x83, 0x12, 0x75, 0xe5, 0xce, 0x18, 0x0d, 0x93, 0x10,
- 0x2f, 0xac, 0x67, 0x1d, 0x17, 0x97, 0xa8, 0xab, 0xff, 0x51, 0x83, 0x85, 0x98, 0x56, 0x34, 0x5b,
- 0x7e, 0x4e, 0x23, 0xc0, 0x30, 0xb6, 0xed, 0xb9, 0xfd, 0x13, 0x8f, 0x41, 0x2d, 0xf2, 0xcb, 0x9e,
- 0xdb, 0xc7, 0x1c, 0x4b, 0xff, 0x48, 0x83, 0x8b, 0x31, 0xcd, 0x33, 0xe0, 0x24, 0xaf, 0xc7, 0x39,
- 0xc9, 0xb5, 0x51, 0x06, 0x92, 0xc3, 0x4c, 0x3e, 0x2a, 0x25, 0x86, 0xc1, 0x06, 0x8c, 0xb6, 0x61,
- 0x6a, 0xe0, 0x76, 0xdb, 0xa7, 0x70, 0xf9, 0x3b, 0xcb, 0xb8, 0x62, 0x2b, 0xc4, 0xc2, 0x51, 0x60,
- 0x74, 0x0f, 0x2e, 0x32, 0xda, 0xe2, 0x0f, 0x0c, 0x93, 0xb4, 0x4f, 0xe1, 0x75, 0xd8, 0x43, 0xfc,
- 0x76, 0x29, 0x89, 0x88, 0xd3, 0x9d, 0xa0, 0x75, 0x98, 0xb0, 0x06, 0xfc, 0xec, 0x22, 0x37, 0xe9,
- 0xb1, 0x04, 0x4f, 0x9c, 0x74, 0x44, 0xf9, 0x90, 0x0f, 0x38, 0xc0, 0xd0, 0xff, 0x92, 0x8c, 0x06,
- 0x4e, 0x85, 0x5f, 0x89, 0x50, 0x0f, 0x79, 0x0f, 0x74, 0x32, 0xda, 0xd1, 0x94, 0x2c, 0xe7, 0xa4,
- 0xac, 0xbd, 0x9a, 0xe0, 0x44, 0x5f, 0x82, 0x09, 0xe2, 0x74, 0xf9, 0x41, 0x40, 0xbc, 0x64, 0xe1,
- 0xa3, 0xba, 0x23, 0x9a, 0x70, 0x20, 0xd3, 0x7f, 0x54, 0x4e, 0x8c, 0x8a, 0x97, 0xf0, 0x77, 0x4e,
- 0x2d, 0x38, 0xd4, 0x61, 0x22, 0x37, 0x40, 0xb6, 0x42, 0x6a, 0x29, 0x62, 0xfe, 0x6b, 0xa3, 0xc4,
- 0x7c, 0xb4, 0xb6, 0xe6, 0x12, 0x4b, 0xf4, 0x2d, 0x18, 0x27, 0xa2, 0x0b, 0x51, 0xb1, 0x6f, 0x8e,
- 0xd2, 0x45, 0x98, 0x7e, 0xc3, 0x94, 0x2d, 0xdb, 0x24, 0x2a, 0x7a, 0x89, 0xcd, 0x17, 0xd3, 0x65,
- 0x47, 0x1e, 0xc1, 0xcc, 0x27, 0x1b, 0x8f, 0x88, 0x61, 0xab, 0xe6, 0x07, 0x07, 0xcb, 0x10, 0x3e,
- 0xe2, 0xa8, 0x85, 0xfe, 0x3d, 0xb8, 0x94, 0x51, 0x22, 0x90, 0x19, 0x7b, 0x33, 0x24, 0x32, 0x66,
- 0xbd, 0xd8, 0x32, 0x14, 0xbf, 0xe2, 0x7c, 0xbf, 0x04, 0x20, 0xdf, 0x45, 0x9d, 0xcd, 0x97, 0x55,
- 0xa3, 0xdd, 0x0a, 0x86, 0xae, 0x9d, 0xda, 0xad, 0x60, 0x04, 0xf2, 0xe8, 0x52, 0xfc, 0x8f, 0x12,
- 0x5c, 0x0a, 0x95, 0x0b, 0xdf, 0x0a, 0x66, 0x98, 0xfc, 0xef, 0xeb, 0xaa, 0x62, 0x37, 0x75, 0xe1,
- 0xd4, 0xfd, 0xe7, 0xdd, 0xd4, 0x85, 0xbe, 0xe5, 0x54, 0xda, 0x5f, 0x97, 0xa2, 0x03, 0x18, 0xf1,
- 0xba, 0xe8, 0x14, 0x3e, 0x30, 0xfa, 0xc2, 0xdd, 0x38, 0xe9, 0x7f, 0x2e, 0xc3, 0x85, 0xe4, 0x6e,
- 0x8c, 0xdd, 0x2a, 0x68, 0xc7, 0xde, 0x2a, 0xb4, 0x60, 0x6e, 0x7b, 0x68, 0xdb, 0xfb, 0x7c, 0x0c,
- 0x91, 0xab, 0x05, 0x71, 0x1f, 0xf1, 0x7f, 0xd2, 0x72, 0xee, 0xe5, 0x0c, 0x1d, 0x9c, 0x69, 0x99,
- 0xbe, 0x64, 0x18, 0xfb, 0x77, 0x2f, 0x19, 0x2a, 0x27, 0xb8, 0x64, 0xc8, 0xbe, 0xa7, 0x29, 0x9f,
- 0xe8, 0x9e, 0xe6, 0x24, 0x37, 0x0c, 0x19, 0x49, 0xec, 0xd8, 0x52, 0xf2, 0x22, 0xcc, 0xc4, 0x6f,
- 0xbd, 0xc4, 0x5a, 0x8a, 0x8b, 0x37, 0x79, 0xc7, 0x14, 0x59, 0x4b, 0xd1, 0x8e, 0x95, 0x86, 0x7e,
- 0xa8, 0xc1, 0xe5, 0xec, 0xaf, 0x5b, 0x90, 0x0d, 0x33, 0x7d, 0xe3, 0x5e, 0xf4, 0x8b, 0x23, 0xed,
- 0x84, 0x4c, 0x89, 0x5f, 0x77, 0xac, 0xc7, 0xb0, 0x70, 0x02, 0x1b, 0xbd, 0x05, 0xd5, 0xbe, 0x71,
- 0xaf, 0x3d, 0xf4, 0x7a, 0xe4, 0xc4, 0x8c, 0x8c, 0x6f, 0xa3, 0x75, 0x89, 0x82, 0x15, 0x9e, 0xfe,
- 0x99, 0x06, 0xf3, 0x39, 0x97, 0x18, 0xff, 0x45, 0xa3, 0x7c, 0xb7, 0x04, 0x95, 0xb6, 0x69, 0xd8,
- 0xe4, 0x0c, 0x08, 0xc5, 0xab, 0x31, 0x42, 0x71, 0xdc, 0x57, 0xb2, 0xdc, 0xab, 0x5c, 0x2e, 0x81,
- 0x13, 0x5c, 0xe2, 0x89, 0x42, 0x68, 0x47, 0xd3, 0x88, 0xe7, 0x60, 0x52, 0x75, 0x3a, 0x5a, 0x76,
- 0xd3, 0x7f, 0x5e, 0x82, 0xa9, 0x48, 0x17, 0x23, 0xe6, 0xc6, 0xed, 0x58, 0x41, 0x28, 0x17, 0x78,
- 0x83, 0x14, 0xe9, 0xab, 0x16, 0x94, 0x00, 0xf1, 0x95, 0x47, 0x78, 0xaf, 0x9f, 0xae, 0x0c, 0x2f,
- 0xc2, 0x0c, 0x35, 0xbc, 0x1e, 0xa1, 0xea, 0xc8, 0x20, 0x5e, 0x9e, 0xaa, 0xcf, 0x8d, 0x3a, 0x31,
- 0x29, 0x4e, 0x68, 0x2f, 0xbe, 0x00, 0xd3, 0xb1, 0xce, 0x46, 0xf9, 0x48, 0xa3, 0xb1, 0x72, 0xff,
- 0xd3, 0xa5, 0x73, 0x1f, 0x7f, 0xba, 0x74, 0xee, 0x93, 0x4f, 0x97, 0xce, 0x7d, 0xff, 0x70, 0x49,
- 0xbb, 0x7f, 0xb8, 0xa4, 0x7d, 0x7c, 0xb8, 0xa4, 0x7d, 0x72, 0xb8, 0xa4, 0xfd, 0xed, 0x70, 0x49,
- 0xfb, 0xc9, 0x67, 0x4b, 0xe7, 0xde, 0x7a, 0xe4, 0xc8, 0xff, 0xd9, 0xf8, 0x57, 0x00, 0x00, 0x00,
- 0xff, 0xff, 0x39, 0x36, 0x95, 0x55, 0xec, 0x31, 0x00, 0x00,
+ // 2858 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5b, 0xcd, 0x6f, 0x24, 0x47,
+ 0x15, 0xdf, 0x9e, 0xf1, 0xd8, 0xe3, 0xe7, 0xb5, 0xbd, 0x5b, 0xeb, 0xac, 0x1d, 0x2f, 0xb1, 0xa3,
+ 0x46, 0x84, 0x4d, 0xd8, 0xcc, 0xb0, 0x9b, 0x64, 0xc9, 0x87, 0x94, 0xb0, 0xe3, 0xdd, 0x64, 0x9d,
+ 0xd8, 0xe3, 0x49, 0xcd, 0x38, 0x41, 0x11, 0x01, 0xda, 0x3d, 0xe5, 0x71, 0xc7, 0x3d, 0xdd, 0xa3,
+ 0xee, 0x1a, 0xb3, 0xbe, 0x81, 0xe0, 0x92, 0x13, 0x5c, 0x02, 0x1c, 0x91, 0x90, 0xb8, 0x72, 0xe5,
+ 0x10, 0x22, 0x10, 0x41, 0x5a, 0x21, 0x0e, 0x91, 0x38, 0x90, 0x93, 0x45, 0x9c, 0x13, 0xe2, 0x1f,
+ 0x40, 0x7b, 0x42, 0xf5, 0xd1, 0xd5, 0xdf, 0x76, 0x8f, 0xf1, 0x5a, 0x04, 0x71, 0x5a, 0x4f, 0xbd,
+ 0xf7, 0x7e, 0xf5, 0xaa, 0xea, 0xd5, 0x7b, 0xbf, 0xaa, 0xea, 0x85, 0x57, 0x77, 0x9f, 0xf7, 0x6b,
+ 0x96, 0x5b, 0xdf, 0x1d, 0x6e, 0x11, 0xcf, 0x21, 0x94, 0xf8, 0xf5, 0x3d, 0xe2, 0x74, 0x5d, 0xaf,
+ 0x2e, 0x05, 0xc6, 0xc0, 0xaa, 0x93, 0x7b, 0x94, 0x38, 0xbe, 0xe5, 0x3a, 0x7e, 0x7d, 0xef, 0xfa,
+ 0x16, 0xa1, 0xc6, 0xf5, 0x7a, 0x8f, 0x38, 0xc4, 0x33, 0x28, 0xe9, 0xd6, 0x06, 0x9e, 0x4b, 0x5d,
+ 0xf4, 0x98, 0x50, 0xaf, 0x19, 0x03, 0xab, 0x16, 0xaa, 0xd7, 0xa4, 0xfa, 0xe2, 0xd3, 0x3d, 0x8b,
+ 0xee, 0x0c, 0xb7, 0x6a, 0xa6, 0xdb, 0xaf, 0xf7, 0xdc, 0x9e, 0x5b, 0xe7, 0x56, 0x5b, 0xc3, 0x6d,
+ 0xfe, 0x8b, 0xff, 0xe0, 0x7f, 0x09, 0xb4, 0x45, 0x3d, 0xd2, 0xb9, 0xe9, 0x7a, 0xa4, 0xbe, 0x97,
+ 0xea, 0x71, 0xf1, 0xd9, 0x50, 0xa7, 0x6f, 0x98, 0x3b, 0x96, 0x43, 0xbc, 0xfd, 0xfa, 0x60, 0xb7,
+ 0xc7, 0x1a, 0xfc, 0x7a, 0x9f, 0x50, 0x23, 0xcb, 0xaa, 0x9e, 0x67, 0xe5, 0x0d, 0x1d, 0x6a, 0xf5,
+ 0x49, 0xca, 0xe0, 0xe6, 0x71, 0x06, 0xbe, 0xb9, 0x43, 0xfa, 0x46, 0xca, 0xee, 0x99, 0x3c, 0xbb,
+ 0x21, 0xb5, 0xec, 0xba, 0xe5, 0x50, 0x9f, 0x7a, 0x49, 0x23, 0xfd, 0x83, 0x12, 0x4c, 0xde, 0x36,
+ 0x48, 0xdf, 0x75, 0xda, 0x84, 0xa2, 0xef, 0x41, 0x95, 0x0d, 0xa3, 0x6b, 0x50, 0x63, 0x41, 0x7b,
+ 0x5c, 0xbb, 0x3a, 0x75, 0xe3, 0xeb, 0xb5, 0x70, 0x9a, 0x15, 0x6a, 0x6d, 0xb0, 0xdb, 0x63, 0x0d,
+ 0x7e, 0x8d, 0x69, 0xd7, 0xf6, 0xae, 0xd7, 0x36, 0xb6, 0xde, 0x23, 0x26, 0x5d, 0x27, 0xd4, 0x68,
+ 0xa0, 0xfb, 0x07, 0xcb, 0xe7, 0x0e, 0x0f, 0x96, 0x21, 0x6c, 0xc3, 0x0a, 0x15, 0x35, 0x61, 0xcc,
+ 0x1f, 0x10, 0x73, 0xa1, 0xc4, 0xd1, 0xaf, 0xd5, 0x8e, 0x5c, 0xc4, 0x9a, 0xf2, 0xac, 0x3d, 0x20,
+ 0x66, 0xe3, 0xbc, 0x44, 0x1e, 0x63, 0xbf, 0x30, 0xc7, 0x41, 0x6f, 0xc1, 0xb8, 0x4f, 0x0d, 0x3a,
+ 0xf4, 0x17, 0xca, 0x1c, 0xb1, 0x56, 0x18, 0x91, 0x5b, 0x35, 0x66, 0x24, 0xe6, 0xb8, 0xf8, 0x8d,
+ 0x25, 0x9a, 0xfe, 0x8f, 0x12, 0x20, 0xa5, 0xbb, 0xe2, 0x3a, 0x5d, 0x8b, 0x5a, 0xae, 0x83, 0x5e,
+ 0x84, 0x31, 0xba, 0x3f, 0x20, 0x7c, 0x72, 0x26, 0x1b, 0x4f, 0x04, 0x0e, 0x75, 0xf6, 0x07, 0xe4,
+ 0xc1, 0xc1, 0xf2, 0xe5, 0xb4, 0x05, 0x93, 0x60, 0x6e, 0x83, 0xd6, 0x94, 0xab, 0x25, 0x6e, 0xfd,
+ 0x6c, 0xbc, 0xeb, 0x07, 0x07, 0xcb, 0x19, 0x41, 0x58, 0x53, 0x48, 0x71, 0x07, 0xd1, 0x1e, 0x20,
+ 0xdb, 0xf0, 0x69, 0xc7, 0x33, 0x1c, 0x5f, 0xf4, 0x64, 0xf5, 0x89, 0x9c, 0x84, 0xa7, 0x8a, 0x2d,
+ 0x1a, 0xb3, 0x68, 0x2c, 0x4a, 0x2f, 0xd0, 0x5a, 0x0a, 0x0d, 0x67, 0xf4, 0x80, 0x9e, 0x80, 0x71,
+ 0x8f, 0x18, 0xbe, 0xeb, 0x2c, 0x8c, 0xf1, 0x51, 0xa8, 0x09, 0xc4, 0xbc, 0x15, 0x4b, 0x29, 0x7a,
+ 0x12, 0x26, 0xfa, 0xc4, 0xf7, 0x8d, 0x1e, 0x59, 0xa8, 0x70, 0xc5, 0x59, 0xa9, 0x38, 0xb1, 0x2e,
+ 0x9a, 0x71, 0x20, 0xd7, 0x3f, 0xd4, 0x60, 0x5a, 0xcd, 0xdc, 0x9a, 0xe5, 0x53, 0xf4, 0xed, 0x54,
+ 0x1c, 0xd6, 0x8a, 0x0d, 0x89, 0x59, 0xf3, 0x28, 0xbc, 0x20, 0x7b, 0xab, 0x06, 0x2d, 0x91, 0x18,
+ 0x5c, 0x87, 0x8a, 0x45, 0x49, 0x9f, 0xad, 0x43, 0xf9, 0xea, 0xd4, 0x8d, 0xab, 0x45, 0x43, 0xa6,
+ 0x31, 0x2d, 0x41, 0x2b, 0xab, 0xcc, 0x1c, 0x0b, 0x14, 0xfd, 0x67, 0x63, 0x11, 0xf7, 0x59, 0x68,
+ 0xa2, 0x77, 0xa1, 0xea, 0x13, 0x9b, 0x98, 0xd4, 0xf5, 0xa4, 0xfb, 0xcf, 0x14, 0x74, 0xdf, 0xd8,
+ 0x22, 0x76, 0x5b, 0x9a, 0x36, 0xce, 0x33, 0xff, 0x83, 0x5f, 0x58, 0x41, 0xa2, 0x37, 0xa1, 0x4a,
+ 0x49, 0x7f, 0x60, 0x1b, 0x94, 0xc8, 0x7d, 0xf4, 0xe5, 0xe8, 0x10, 0x58, 0xe4, 0x30, 0xb0, 0x96,
+ 0xdb, 0xed, 0x48, 0x35, 0xbe, 0x7d, 0xd4, 0x94, 0x04, 0xad, 0x58, 0xc1, 0xa0, 0x3d, 0x98, 0x19,
+ 0x0e, 0xba, 0x4c, 0x93, 0xb2, 0xec, 0xd0, 0xdb, 0x97, 0x91, 0x74, 0xb3, 0xe8, 0xdc, 0x6c, 0xc6,
+ 0xac, 0x1b, 0x97, 0x65, 0x5f, 0x33, 0xf1, 0x76, 0x9c, 0xe8, 0x05, 0xdd, 0x82, 0xd9, 0xbe, 0xe5,
+ 0x60, 0x62, 0x74, 0xf7, 0xdb, 0xc4, 0x74, 0x9d, 0xae, 0xcf, 0xc3, 0xaa, 0xd2, 0x98, 0x97, 0x00,
+ 0xb3, 0xeb, 0x71, 0x31, 0x4e, 0xea, 0xa3, 0xd7, 0x01, 0x05, 0xc3, 0x78, 0x4d, 0x24, 0x37, 0xcb,
+ 0x75, 0x78, 0xcc, 0x95, 0xc3, 0xe0, 0xee, 0xa4, 0x34, 0x70, 0x86, 0x15, 0x5a, 0x83, 0x39, 0x8f,
+ 0xec, 0x59, 0x6c, 0x8c, 0x77, 0x2d, 0x9f, 0xba, 0xde, 0xfe, 0x9a, 0xd5, 0xb7, 0xe8, 0xc2, 0x38,
+ 0xf7, 0x69, 0xe1, 0xf0, 0x60, 0x79, 0x0e, 0x67, 0xc8, 0x71, 0xa6, 0x95, 0xfe, 0xf3, 0x71, 0x98,
+ 0x4d, 0xe4, 0x1b, 0xf4, 0x16, 0x5c, 0x36, 0x87, 0x9e, 0x47, 0x1c, 0xda, 0x1c, 0xf6, 0xb7, 0x88,
+ 0xd7, 0x36, 0x77, 0x48, 0x77, 0x68, 0x93, 0x2e, 0x0f, 0x94, 0x4a, 0x63, 0x49, 0x7a, 0x7c, 0x79,
+ 0x25, 0x53, 0x0b, 0xe7, 0x58, 0xb3, 0x59, 0x70, 0x78, 0xd3, 0xba, 0xe5, 0xfb, 0x0a, 0xb3, 0xc4,
+ 0x31, 0xd5, 0x2c, 0x34, 0x53, 0x1a, 0x38, 0xc3, 0x8a, 0xf9, 0xd8, 0x25, 0xbe, 0xe5, 0x91, 0x6e,
+ 0xd2, 0xc7, 0x72, 0xdc, 0xc7, 0xdb, 0x99, 0x5a, 0x38, 0xc7, 0x1a, 0x3d, 0x07, 0x53, 0xa2, 0x37,
+ 0xbe, 0x7e, 0x72, 0xa1, 0x2f, 0x49, 0xb0, 0xa9, 0x66, 0x28, 0xc2, 0x51, 0x3d, 0x36, 0x34, 0x77,
+ 0xcb, 0x27, 0xde, 0x1e, 0xe9, 0xe6, 0x2f, 0xf0, 0x46, 0x4a, 0x03, 0x67, 0x58, 0xb1, 0xa1, 0x89,
+ 0x08, 0x4c, 0x0d, 0x6d, 0x3c, 0x3e, 0xb4, 0xcd, 0x4c, 0x2d, 0x9c, 0x63, 0xcd, 0xe2, 0x58, 0xb8,
+ 0x7c, 0x6b, 0xcf, 0xb0, 0x6c, 0x63, 0xcb, 0x26, 0x0b, 0x13, 0xf1, 0x38, 0x6e, 0xc6, 0xc5, 0x38,
+ 0xa9, 0x8f, 0x5e, 0x83, 0x8b, 0xa2, 0x69, 0xd3, 0x31, 0x14, 0x48, 0x95, 0x83, 0x3c, 0x2a, 0x41,
+ 0x2e, 0x36, 0x93, 0x0a, 0x38, 0x6d, 0x83, 0x5e, 0x84, 0x19, 0xd3, 0xb5, 0x6d, 0x1e, 0x8f, 0x2b,
+ 0xee, 0xd0, 0xa1, 0x0b, 0x93, 0x1c, 0x05, 0xb1, 0xfd, 0xb8, 0x12, 0x93, 0xe0, 0x84, 0x26, 0x22,
+ 0x00, 0x66, 0x50, 0x70, 0xfc, 0x05, 0xe0, 0xf9, 0xf1, 0x7a, 0xd1, 0x1c, 0xa0, 0x4a, 0x55, 0xc8,
+ 0x01, 0x54, 0x93, 0x8f, 0x23, 0xc0, 0xfa, 0x9f, 0x35, 0x98, 0xcf, 0x49, 0x1d, 0xe8, 0x95, 0x58,
+ 0x89, 0xfd, 0x5a, 0xa2, 0xc4, 0x5e, 0xc9, 0x31, 0x8b, 0xd4, 0x59, 0x07, 0xa6, 0x3d, 0x36, 0x2a,
+ 0xa7, 0x27, 0x54, 0x64, 0x8e, 0x7c, 0xee, 0x98, 0x61, 0xe0, 0xa8, 0x4d, 0x98, 0xf3, 0x2f, 0x1e,
+ 0x1e, 0x2c, 0x4f, 0xc7, 0x64, 0x38, 0x0e, 0xaf, 0xff, 0xa2, 0x04, 0x70, 0x9b, 0x0c, 0x6c, 0x77,
+ 0xbf, 0x4f, 0x9c, 0xb3, 0xe0, 0x50, 0x1b, 0x31, 0x0e, 0xf5, 0xf4, 0x71, 0xcb, 0xa3, 0x5c, 0xcb,
+ 0x25, 0x51, 0x6f, 0x27, 0x48, 0x54, 0xbd, 0x38, 0xe4, 0xd1, 0x2c, 0xea, 0x6f, 0x65, 0xb8, 0x14,
+ 0x2a, 0x87, 0x34, 0xea, 0xa5, 0xd8, 0x1a, 0x7f, 0x35, 0xb1, 0xc6, 0xf3, 0x19, 0x26, 0x0f, 0x8d,
+ 0x47, 0xbd, 0x07, 0x33, 0x8c, 0xe5, 0x88, 0xb5, 0xe4, 0x1c, 0x6a, 0x7c, 0x64, 0x0e, 0xa5, 0xaa,
+ 0xdd, 0x5a, 0x0c, 0x09, 0x27, 0x90, 0x73, 0x38, 0xdb, 0xc4, 0x17, 0x91, 0xb3, 0x7d, 0xa4, 0xc1,
+ 0x4c, 0xb8, 0x4c, 0x67, 0x40, 0xda, 0x9a, 0x71, 0xd2, 0xf6, 0x64, 0xe1, 0x10, 0xcd, 0x61, 0x6d,
+ 0xff, 0x62, 0x04, 0x5f, 0x29, 0xb1, 0x0d, 0xbe, 0x65, 0x98, 0xbb, 0xe8, 0x71, 0x18, 0x73, 0x8c,
+ 0x7e, 0x10, 0x99, 0x6a, 0xb3, 0x34, 0x8d, 0x3e, 0xc1, 0x5c, 0x82, 0x3e, 0xd0, 0x00, 0xc9, 0x2a,
+ 0x70, 0xcb, 0x71, 0x5c, 0x6a, 0x88, 0x5c, 0x29, 0xdc, 0x5a, 0x2d, 0xec, 0x56, 0xd0, 0x63, 0x6d,
+ 0x33, 0x85, 0x75, 0xc7, 0xa1, 0xde, 0x7e, 0xb8, 0xc8, 0x69, 0x05, 0x9c, 0xe1, 0x00, 0x32, 0x00,
+ 0x3c, 0x89, 0xd9, 0x71, 0xe5, 0x46, 0x7e, 0xba, 0x40, 0xce, 0x63, 0x06, 0x2b, 0xae, 0xb3, 0x6d,
+ 0xf5, 0xc2, 0xb4, 0x83, 0x15, 0x10, 0x8e, 0x80, 0x2e, 0xde, 0x81, 0xf9, 0x1c, 0x6f, 0xd1, 0x05,
+ 0x28, 0xef, 0x92, 0x7d, 0x31, 0x6d, 0x98, 0xfd, 0x89, 0xe6, 0xa0, 0xb2, 0x67, 0xd8, 0x43, 0x91,
+ 0x7e, 0x27, 0xb1, 0xf8, 0xf1, 0x62, 0xe9, 0x79, 0x4d, 0xff, 0xb0, 0x12, 0x8d, 0x1d, 0xce, 0x98,
+ 0xaf, 0x42, 0xd5, 0x23, 0x03, 0xdb, 0x32, 0x0d, 0x5f, 0x12, 0x21, 0x4e, 0x7e, 0xb1, 0x6c, 0xc3,
+ 0x4a, 0x1a, 0xe3, 0xd6, 0xa5, 0x87, 0xcb, 0xad, 0xcb, 0xa7, 0xc3, 0xad, 0xbf, 0x0b, 0x55, 0x3f,
+ 0x60, 0xd5, 0x63, 0x1c, 0xf2, 0xfa, 0x08, 0xf9, 0x55, 0x12, 0x6a, 0xd5, 0x81, 0xa2, 0xd2, 0x0a,
+ 0x34, 0x8b, 0x44, 0x57, 0x46, 0x24, 0xd1, 0xa7, 0x4a, 0x7c, 0x59, 0xbe, 0x19, 0x18, 0x43, 0x9f,
+ 0x74, 0x79, 0x6e, 0xab, 0x86, 0xf9, 0xa6, 0xc5, 0x5b, 0xb1, 0x94, 0xa2, 0x77, 0x63, 0x21, 0x5b,
+ 0x3d, 0x49, 0xc8, 0xce, 0xe4, 0x87, 0x2b, 0xda, 0x84, 0xf9, 0x81, 0xe7, 0xf6, 0x3c, 0xe2, 0xfb,
+ 0xb7, 0x89, 0xd1, 0xb5, 0x2d, 0x87, 0x04, 0xf3, 0x23, 0x18, 0xd1, 0x95, 0xc3, 0x83, 0xe5, 0xf9,
+ 0x56, 0xb6, 0x0a, 0xce, 0xb3, 0xd5, 0xef, 0x8f, 0xc1, 0x85, 0x64, 0x05, 0xcc, 0x21, 0xa9, 0xda,
+ 0x89, 0x48, 0xea, 0xb5, 0xc8, 0x66, 0x10, 0x0c, 0x5e, 0xad, 0x7e, 0xc6, 0x86, 0xb8, 0x05, 0xb3,
+ 0x32, 0x1b, 0x04, 0x42, 0x49, 0xd3, 0xd5, 0xea, 0x6f, 0xc6, 0xc5, 0x38, 0xa9, 0x8f, 0x5e, 0x82,
+ 0x69, 0x8f, 0xf3, 0xee, 0x00, 0x40, 0x70, 0xd7, 0x47, 0x24, 0xc0, 0x34, 0x8e, 0x0a, 0x71, 0x5c,
+ 0x97, 0xf1, 0xd6, 0x90, 0x8e, 0x06, 0x00, 0x63, 0x71, 0xde, 0x7a, 0x2b, 0xa9, 0x80, 0xd3, 0x36,
+ 0x68, 0x1d, 0x2e, 0x0d, 0x9d, 0x34, 0x94, 0x08, 0xe5, 0x2b, 0x12, 0xea, 0xd2, 0x66, 0x5a, 0x05,
+ 0x67, 0xd9, 0xa1, 0xed, 0x18, 0x95, 0x1d, 0xe7, 0xe9, 0xf9, 0x46, 0xe1, 0x8d, 0x57, 0x98, 0xcb,
+ 0x66, 0xd0, 0xed, 0x6a, 0x51, 0xba, 0xad, 0xff, 0x41, 0x8b, 0x16, 0x21, 0x45, 0x81, 0x8f, 0xbb,
+ 0x65, 0x4a, 0x59, 0x44, 0xd8, 0x91, 0x9b, 0xcd, 0x7e, 0x6f, 0x8e, 0xc4, 0x7e, 0xc3, 0xe2, 0x79,
+ 0x3c, 0xfd, 0xfd, 0xa3, 0x06, 0xb3, 0x77, 0x3b, 0x9d, 0xd6, 0xaa, 0xc3, 0x77, 0x4b, 0xcb, 0xa0,
+ 0x3b, 0xac, 0x8a, 0x0e, 0x0c, 0xba, 0x93, 0xac, 0xa2, 0x4c, 0x86, 0xb9, 0x04, 0x3d, 0x0b, 0x55,
+ 0xf6, 0x2f, 0x73, 0x9c, 0x87, 0xeb, 0x24, 0x4f, 0x32, 0xd5, 0x96, 0x6c, 0x7b, 0x10, 0xf9, 0x1b,
+ 0x2b, 0x4d, 0xf4, 0x2d, 0x98, 0x60, 0x7b, 0x9b, 0x38, 0xdd, 0x82, 0xe4, 0x57, 0x3a, 0xd5, 0x10,
+ 0x46, 0x21, 0x9f, 0x91, 0x0d, 0x38, 0x80, 0xd3, 0x77, 0x61, 0x2e, 0x32, 0x08, 0x3c, 0xb4, 0xc9,
+ 0x5b, 0xac, 0x5e, 0xa1, 0x36, 0x54, 0x58, 0xef, 0xac, 0x2a, 0x95, 0x0b, 0x5c, 0x2f, 0x26, 0x26,
+ 0x22, 0xe4, 0x1e, 0xec, 0x97, 0x8f, 0x05, 0x96, 0xbe, 0x01, 0x13, 0xab, 0xad, 0x86, 0xed, 0x0a,
+ 0xbe, 0x61, 0x5a, 0x5d, 0x2f, 0x39, 0x53, 0x2b, 0xab, 0xb7, 0x31, 0xe6, 0x12, 0xa4, 0xc3, 0x38,
+ 0xb9, 0x67, 0x92, 0x01, 0xe5, 0x14, 0x63, 0xb2, 0x01, 0x2c, 0x91, 0xde, 0xe1, 0x2d, 0x58, 0x4a,
+ 0xf4, 0x9f, 0x94, 0x60, 0x42, 0x76, 0x7b, 0x06, 0xe7, 0x8f, 0xb5, 0xd8, 0xf9, 0xe3, 0xa9, 0x62,
+ 0x4b, 0x90, 0x7b, 0xf8, 0xe8, 0x24, 0x0e, 0x1f, 0xd7, 0x0a, 0xe2, 0x1d, 0x7d, 0xf2, 0x78, 0xbf,
+ 0x04, 0x33, 0xf1, 0xc5, 0x47, 0xcf, 0xc1, 0x14, 0x4b, 0xb5, 0x96, 0x49, 0x9a, 0x21, 0xc3, 0x53,
+ 0xd7, 0x0f, 0xed, 0x50, 0x84, 0xa3, 0x7a, 0xa8, 0xa7, 0xcc, 0x5a, 0xae, 0x47, 0xe5, 0xa0, 0xf3,
+ 0xa7, 0x74, 0x48, 0x2d, 0xbb, 0x26, 0x2e, 0xdb, 0x6b, 0xab, 0x0e, 0xdd, 0xf0, 0xda, 0xd4, 0xb3,
+ 0x9c, 0x5e, 0xaa, 0x23, 0x06, 0x86, 0xa3, 0xc8, 0xe8, 0x6d, 0x96, 0xf6, 0x7d, 0x77, 0xe8, 0x99,
+ 0x24, 0x8b, 0xbe, 0x05, 0xd4, 0x83, 0x6d, 0x84, 0xee, 0x9a, 0x6b, 0x1a, 0xb6, 0x58, 0x1c, 0x4c,
+ 0xb6, 0x89, 0x47, 0x1c, 0x93, 0x04, 0x94, 0x49, 0x40, 0x60, 0x05, 0xa6, 0xff, 0x56, 0x83, 0x29,
+ 0x39, 0x17, 0x67, 0x40, 0xd4, 0xdf, 0x88, 0x13, 0xf5, 0x27, 0x0a, 0xee, 0xd0, 0x6c, 0x96, 0xfe,
+ 0x3b, 0x0d, 0x16, 0x03, 0xd7, 0x5d, 0xa3, 0xdb, 0x30, 0x6c, 0xc3, 0x31, 0x89, 0x17, 0xc4, 0xfa,
+ 0x22, 0x94, 0xac, 0x81, 0x5c, 0x49, 0x90, 0x00, 0xa5, 0xd5, 0x16, 0x2e, 0x59, 0x03, 0x56, 0x45,
+ 0x77, 0x5c, 0x9f, 0x72, 0x36, 0x2f, 0x0e, 0x8a, 0xca, 0xeb, 0xbb, 0xb2, 0x1d, 0x2b, 0x0d, 0xb4,
+ 0x09, 0x95, 0x81, 0xeb, 0x51, 0x56, 0xb9, 0xca, 0x89, 0xf5, 0x3d, 0xc2, 0x6b, 0xb6, 0x6e, 0x32,
+ 0x10, 0xc3, 0x9d, 0xce, 0x60, 0xb0, 0x40, 0xd3, 0x7f, 0xa8, 0xc1, 0xa3, 0x19, 0xfe, 0x4b, 0xd2,
+ 0xd0, 0x85, 0x09, 0x4b, 0x08, 0x65, 0x7a, 0x79, 0xa1, 0x58, 0xb7, 0x19, 0x53, 0x11, 0xa6, 0xb6,
+ 0x20, 0x85, 0x05, 0xd0, 0xfa, 0xaf, 0x34, 0xb8, 0x98, 0xf2, 0x97, 0xa7, 0x68, 0x16, 0xcf, 0x92,
+ 0x6d, 0xab, 0x14, 0xcd, 0xc2, 0x92, 0x4b, 0xd0, 0x1b, 0x50, 0xe5, 0x6f, 0x44, 0xa6, 0x6b, 0xcb,
+ 0x09, 0xac, 0x07, 0x13, 0xd8, 0x92, 0xed, 0x0f, 0x0e, 0x96, 0xaf, 0x64, 0x9c, 0xb5, 0x03, 0x31,
+ 0x56, 0x00, 0x68, 0x19, 0x2a, 0xc4, 0xf3, 0x5c, 0x4f, 0x26, 0xfb, 0x49, 0x36, 0x53, 0x77, 0x58,
+ 0x03, 0x16, 0xed, 0xfa, 0xaf, 0xc3, 0x20, 0x65, 0xd9, 0x97, 0xf9, 0xc7, 0x16, 0x27, 0x99, 0x18,
+ 0xd9, 0xd2, 0x61, 0x2e, 0x41, 0x43, 0xb8, 0x60, 0x25, 0xd2, 0xb5, 0xdc, 0x9d, 0xf5, 0x62, 0xd3,
+ 0xa8, 0xcc, 0x1a, 0x0b, 0x12, 0xfe, 0x42, 0x52, 0x82, 0x53, 0x5d, 0xe8, 0x04, 0x52, 0x5a, 0xe8,
+ 0x4d, 0x18, 0xdb, 0xa1, 0x74, 0x90, 0x71, 0xd9, 0x7f, 0x4c, 0x91, 0x08, 0x5d, 0xa8, 0xf2, 0xd1,
+ 0x75, 0x3a, 0x2d, 0xcc, 0xa1, 0xf4, 0xdf, 0x97, 0xd4, 0x7c, 0xf0, 0x13, 0xd2, 0x37, 0xd5, 0x68,
+ 0x57, 0x6c, 0xc3, 0xf7, 0x79, 0x0a, 0x13, 0xa7, 0xf9, 0xb9, 0x88, 0xe3, 0x4a, 0x86, 0x53, 0xda,
+ 0xa8, 0x13, 0x16, 0x4f, 0xed, 0x24, 0xc5, 0x73, 0x2a, 0xab, 0x70, 0xa2, 0xbb, 0x50, 0xa6, 0x76,
+ 0xd1, 0x53, 0xb9, 0x44, 0xec, 0xac, 0xb5, 0x1b, 0x53, 0x72, 0xca, 0xcb, 0x9d, 0xb5, 0x36, 0x66,
+ 0x10, 0x68, 0x03, 0x2a, 0xde, 0xd0, 0x26, 0xac, 0x0e, 0x94, 0x8b, 0xd7, 0x15, 0x36, 0x83, 0xe1,
+ 0xe6, 0x63, 0xbf, 0x7c, 0x2c, 0x70, 0xf4, 0x1f, 0x69, 0x30, 0x1d, 0xab, 0x16, 0xc8, 0x83, 0xf3,
+ 0x76, 0x64, 0xef, 0xc8, 0x79, 0x78, 0x7e, 0xf4, 0x5d, 0x27, 0x37, 0xfd, 0x9c, 0xec, 0xf7, 0x7c,
+ 0x54, 0x86, 0x63, 0x7d, 0xe8, 0x06, 0x40, 0x38, 0x6c, 0xb6, 0x0f, 0x58, 0xf0, 0x8a, 0x0d, 0x2f,
+ 0xf7, 0x01, 0x8b, 0x69, 0x1f, 0x8b, 0x76, 0x74, 0x03, 0xc0, 0x27, 0xa6, 0x47, 0x68, 0x33, 0x4c,
+ 0x5c, 0xaa, 0x1c, 0xb7, 0x95, 0x04, 0x47, 0xb4, 0xf4, 0x3f, 0x69, 0x30, 0xdd, 0x24, 0xf4, 0xfb,
+ 0xae, 0xb7, 0xdb, 0x72, 0x6d, 0xcb, 0xdc, 0x3f, 0x03, 0x12, 0x80, 0x63, 0x24, 0xe0, 0xb8, 0x7c,
+ 0x19, 0xf3, 0x2e, 0x8f, 0x0a, 0xe8, 0x1f, 0x69, 0x30, 0x1f, 0xd3, 0xbc, 0x13, 0xe6, 0x03, 0x95,
+ 0xa0, 0xb5, 0x42, 0x09, 0x3a, 0x06, 0xc3, 0x92, 0x5a, 0x76, 0x82, 0x46, 0x6b, 0x50, 0xa2, 0xae,
+ 0x8c, 0xde, 0xd1, 0x30, 0x09, 0xf1, 0xc2, 0x9a, 0xd3, 0x71, 0x71, 0x89, 0xba, 0x6c, 0x21, 0x16,
+ 0x62, 0x5a, 0xd1, 0x8c, 0xf6, 0x90, 0x46, 0x80, 0x61, 0x6c, 0xdb, 0x73, 0xfb, 0x27, 0x1e, 0x83,
+ 0x5a, 0x88, 0x57, 0x3d, 0xb7, 0x8f, 0x39, 0x96, 0xfe, 0xb1, 0x06, 0x17, 0x63, 0x9a, 0x67, 0xc0,
+ 0x1b, 0xde, 0x8c, 0xf3, 0x86, 0x6b, 0xa3, 0x0c, 0x24, 0x87, 0x3d, 0x7c, 0x5c, 0x4a, 0x0c, 0x83,
+ 0x0d, 0x18, 0x6d, 0xc3, 0xd4, 0xc0, 0xed, 0xb6, 0x4f, 0xe1, 0x81, 0x76, 0x96, 0xf1, 0xb9, 0x56,
+ 0x88, 0x85, 0xa3, 0xc0, 0xe8, 0x1e, 0x5c, 0x64, 0xd4, 0xc2, 0x1f, 0x18, 0x26, 0x69, 0x9f, 0xc2,
+ 0x95, 0xd5, 0x23, 0xfc, 0x05, 0x28, 0x89, 0x88, 0xd3, 0x9d, 0xa0, 0x75, 0x98, 0xb0, 0x06, 0xfc,
+ 0x7c, 0x21, 0x89, 0xe4, 0xb1, 0x24, 0x4c, 0x9c, 0x46, 0x44, 0x8a, 0x97, 0x3f, 0x70, 0x80, 0xa1,
+ 0xff, 0x35, 0x19, 0x0d, 0x9c, 0xae, 0xbe, 0x16, 0xa1, 0x07, 0xf2, 0xad, 0xe6, 0x64, 0xd4, 0xa0,
+ 0x29, 0x99, 0xc8, 0x49, 0x99, 0x75, 0x35, 0xc1, 0x5b, 0xbe, 0x02, 0x13, 0xc4, 0xe9, 0x72, 0xb2,
+ 0x2e, 0x2e, 0x42, 0xf8, 0xa8, 0xee, 0x88, 0x26, 0x1c, 0xc8, 0xf4, 0x1f, 0x97, 0x13, 0xa3, 0xe2,
+ 0x65, 0xf6, 0xbd, 0x53, 0x0b, 0x0e, 0x45, 0xf8, 0x73, 0x03, 0x64, 0x2b, 0xa4, 0x7f, 0x22, 0xe6,
+ 0xbf, 0x31, 0x4a, 0xcc, 0x47, 0xeb, 0x5f, 0x2e, 0xf9, 0x43, 0xdf, 0x81, 0x71, 0x22, 0xba, 0x10,
+ 0x55, 0xf5, 0xe6, 0x28, 0x5d, 0x84, 0xe9, 0x37, 0x3c, 0x67, 0xc9, 0x36, 0x89, 0x8a, 0x5e, 0x61,
+ 0xf3, 0xc5, 0x74, 0xd9, 0xb1, 0x44, 0xb0, 0xe7, 0xc9, 0xc6, 0x63, 0x62, 0xd8, 0xaa, 0xf9, 0xc1,
+ 0xc1, 0x32, 0x84, 0x3f, 0x71, 0xd4, 0x82, 0xbf, 0x9e, 0xc9, 0x3b, 0x9b, 0xb3, 0xf9, 0x02, 0x69,
+ 0xb4, 0xd7, 0xb3, 0xd0, 0xb5, 0x53, 0x7b, 0x3d, 0x8b, 0x40, 0x1e, 0x7d, 0x86, 0xfd, 0x67, 0x09,
+ 0x2e, 0x85, 0xca, 0x85, 0x5f, 0xcf, 0x32, 0x4c, 0xfe, 0xff, 0x15, 0x52, 0xb1, 0x17, 0xad, 0x70,
+ 0xea, 0xfe, 0xfb, 0x5e, 0xb4, 0x42, 0xdf, 0x72, 0xaa, 0xdd, 0x6f, 0x4a, 0xd1, 0x01, 0x8c, 0xf8,
+ 0xac, 0x72, 0x0a, 0x1f, 0xe2, 0x7c, 0xe1, 0x5e, 0x66, 0xf4, 0xbf, 0x94, 0xe1, 0x42, 0x72, 0x37,
+ 0xc6, 0x6e, 0xdf, 0xb5, 0x63, 0x6f, 0xdf, 0x5b, 0x30, 0xb7, 0x3d, 0xb4, 0xed, 0x7d, 0x3e, 0x86,
+ 0xc8, 0x15, 0xbc, 0xb8, 0xb7, 0xff, 0x92, 0xb4, 0x9c, 0x7b, 0x35, 0x43, 0x07, 0x67, 0x5a, 0xa6,
+ 0x2f, 0xe3, 0xc7, 0xfe, 0xd3, 0xcb, 0xf8, 0xca, 0x09, 0x2e, 0xe3, 0xb3, 0xdf, 0x33, 0xca, 0x27,
+ 0x7a, 0xcf, 0x38, 0xc9, 0x4d, 0x7c, 0x46, 0x12, 0x3b, 0xf6, 0xab, 0x92, 0x97, 0x61, 0x26, 0xfe,
+ 0x3a, 0x24, 0xd6, 0x52, 0x3c, 0x50, 0xc9, 0xb7, 0x98, 0xc8, 0x5a, 0x8a, 0x76, 0xac, 0x34, 0xf4,
+ 0x43, 0x0d, 0x2e, 0x67, 0x7f, 0x05, 0x82, 0x6c, 0x98, 0xe9, 0x1b, 0xf7, 0xa2, 0x5f, 0xe6, 0x68,
+ 0x27, 0x64, 0x2b, 0xfc, 0x59, 0x60, 0x3d, 0x86, 0x85, 0x13, 0xd8, 0xe8, 0x1d, 0xa8, 0xf6, 0x8d,
+ 0x7b, 0xed, 0xa1, 0xd7, 0x23, 0x27, 0x66, 0x45, 0x7c, 0x1b, 0xad, 0x4b, 0x14, 0xac, 0xf0, 0xf4,
+ 0xcf, 0x35, 0x98, 0xcf, 0xb9, 0xec, 0xff, 0x1f, 0x1a, 0xe5, 0xfb, 0x25, 0xa8, 0xb4, 0x4d, 0xc3,
+ 0x26, 0x67, 0x40, 0x28, 0x5e, 0x8f, 0x11, 0x8a, 0xe3, 0xbe, 0x26, 0xe5, 0x5e, 0xe5, 0x72, 0x09,
+ 0x9c, 0xe0, 0x12, 0x4f, 0x15, 0x42, 0x3b, 0x9a, 0x46, 0xbc, 0x00, 0x93, 0xaa, 0xd3, 0xd1, 0xb2,
+ 0x9b, 0xfe, 0xcb, 0x12, 0x4c, 0x45, 0xba, 0x18, 0x31, 0x37, 0x6e, 0xc7, 0x0a, 0x42, 0xb9, 0xc0,
+ 0x4d, 0x4b, 0xa4, 0xaf, 0x5a, 0x50, 0x02, 0xc4, 0xd7, 0x10, 0xe1, 0xfb, 0x77, 0xba, 0x32, 0xbc,
+ 0x0c, 0x33, 0xd4, 0xf0, 0x7a, 0x84, 0x2a, 0xda, 0x2e, 0x2e, 0x19, 0xd5, 0x67, 0x39, 0x9d, 0x98,
+ 0x14, 0x27, 0xb4, 0x17, 0x5f, 0x82, 0xe9, 0x58, 0x67, 0xa3, 0x7c, 0xcc, 0xd0, 0x58, 0xb9, 0xff,
+ 0xd9, 0xd2, 0xb9, 0x4f, 0x3e, 0x5b, 0x3a, 0xf7, 0xe9, 0x67, 0x4b, 0xe7, 0x7e, 0x70, 0xb8, 0xa4,
+ 0xdd, 0x3f, 0x5c, 0xd2, 0x3e, 0x39, 0x5c, 0xd2, 0x3e, 0x3d, 0x5c, 0xd2, 0xfe, 0x7e, 0xb8, 0xa4,
+ 0xfd, 0xf4, 0xf3, 0xa5, 0x73, 0xef, 0x3c, 0x76, 0xe4, 0xff, 0x6d, 0xf8, 0x77, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0xf3, 0x1c, 0xa0, 0x16, 0x14, 0x31, 0x00, 0x00,
}
func (m *DaemonSet) Marshal() (dAtA []byte, err error) {
@@ -2944,16 +2913,6 @@ func (m *NetworkPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) {
_ = i
var l int
_ = l
- {
- size, err := m.Status.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x1a
{
size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
@@ -3302,43 +3261,6 @@ func (m *NetworkPolicySpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
return len(dAtA) - i, nil
}
-func (m *NetworkPolicyStatus) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *NetworkPolicyStatus) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *NetworkPolicyStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if len(m.Conditions) > 0 {
- for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- }
- return len(dAtA) - i, nil
-}
-
func (m *ReplicaSet) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
@@ -4362,8 +4284,6 @@ func (m *NetworkPolicy) Size() (n int) {
n += 1 + l + sovGenerated(uint64(l))
l = m.Spec.Size()
n += 1 + l + sovGenerated(uint64(l))
- l = m.Status.Size()
- n += 1 + l + sovGenerated(uint64(l))
return n
}
@@ -4496,21 +4416,6 @@ func (m *NetworkPolicySpec) Size() (n int) {
return n
}
-func (m *NetworkPolicyStatus) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if len(m.Conditions) > 0 {
- for _, e := range m.Conditions {
- l = e.Size()
- n += 1 + l + sovGenerated(uint64(l))
- }
- }
- return n
-}
-
func (m *ReplicaSet) Size() (n int) {
if m == nil {
return 0
@@ -5098,7 +5003,6 @@ func (this *NetworkPolicy) String() string {
s := strings.Join([]string{`&NetworkPolicy{`,
`ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
`Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "NetworkPolicySpec", "NetworkPolicySpec", 1), `&`, ``, 1) + `,`,
- `Status:` + strings.Replace(strings.Replace(this.Status.String(), "NetworkPolicyStatus", "NetworkPolicyStatus", 1), `&`, ``, 1) + `,`,
`}`,
}, "")
return s
@@ -5208,21 +5112,6 @@ func (this *NetworkPolicySpec) String() string {
}, "")
return s
}
-func (this *NetworkPolicyStatus) String() string {
- if this == nil {
- return "nil"
- }
- repeatedStringForConditions := "[]Condition{"
- for _, f := range this.Conditions {
- repeatedStringForConditions += fmt.Sprintf("%v", f) + ","
- }
- repeatedStringForConditions += "}"
- s := strings.Join([]string{`&NetworkPolicyStatus{`,
- `Conditions:` + repeatedStringForConditions + `,`,
- `}`,
- }, "")
- return s
-}
func (this *ReplicaSet) String() string {
if this == nil {
return "nil"
@@ -9627,39 +9516,6 @@ func (m *NetworkPolicy) Unmarshal(dAtA []byte) error {
return err
}
iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
@@ -10514,90 +10370,6 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error {
}
return nil
}
-func (m *NetworkPolicyStatus) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: NetworkPolicyStatus: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: NetworkPolicyStatus: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Conditions = append(m.Conditions, v1.Condition{})
- if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipGenerated(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthGenerated
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
func (m *ReplicaSet) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
diff --git a/vendor/k8s.io/api/extensions/v1beta1/generated.proto b/vendor/k8s.io/api/extensions/v1beta1/generated.proto
index 3ab6a093b5..3f2549681e 100644
--- a/vendor/k8s.io/api/extensions/v1beta1/generated.proto
+++ b/vendor/k8s.io/api/extensions/v1beta1/generated.proto
@@ -646,11 +646,6 @@ message NetworkPolicy {
// Specification of the desired behavior for this NetworkPolicy.
// +optional
optional NetworkPolicySpec spec = 2;
-
- // Status is the current state of the NetworkPolicy.
- // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
- // +optional
- optional NetworkPolicyStatus status = 3;
}
// DEPRECATED 1.9 - This group version of NetworkPolicyEgressRule is deprecated by networking/v1/NetworkPolicyEgressRule.
@@ -798,18 +793,6 @@ message NetworkPolicySpec {
repeated string policyTypes = 4;
}
-// NetworkPolicyStatus describe the current state of the NetworkPolicy.
-message NetworkPolicyStatus {
- // Conditions holds an array of metav1.Condition that describe the state of the NetworkPolicy.
- // Current service state
- // +optional
- // +patchMergeKey=type
- // +patchStrategy=merge
- // +listType=map
- // +listMapKey=type
- repeated k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 1;
-}
-
// DEPRECATED - This group version of ReplicaSet is deprecated by apps/v1beta2/ReplicaSet. See the release notes for
// more information.
// ReplicaSet ensures that a specified number of pod replicas are running at any given time.
diff --git a/vendor/k8s.io/api/extensions/v1beta1/types.go b/vendor/k8s.io/api/extensions/v1beta1/types.go
index c0ac6fa25d..70b349f654 100644
--- a/vendor/k8s.io/api/extensions/v1beta1/types.go
+++ b/vendor/k8s.io/api/extensions/v1beta1/types.go
@@ -1041,10 +1041,10 @@ type NetworkPolicy struct {
// +optional
Spec NetworkPolicySpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
- // Status is the current state of the NetworkPolicy.
- // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
- // +optional
- Status NetworkPolicyStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+ // Status is tombstoned to show why 3 is a reserved protobuf tag.
+ // This commented field should remain, so in the future if we decide to reimplement
+ // NetworkPolicyStatus a different protobuf name and tag SHOULD be used!
+ // Status NetworkPolicyStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
}
// DEPRECATED 1.9 - This group version of PolicyType is deprecated by networking/v1/PolicyType.
@@ -1207,48 +1207,6 @@ type NetworkPolicyPeer struct {
IPBlock *IPBlock `json:"ipBlock,omitempty" protobuf:"bytes,3,rep,name=ipBlock"`
}
-// NetworkPolicyConditionType is the type for status conditions on
-// a NetworkPolicy. This type should be used with the
-// NetworkPolicyStatus.Conditions field.
-type NetworkPolicyConditionType string
-
-const (
- // NetworkPolicyConditionStatusAccepted represents status of a Network Policy that could be properly parsed by
- // the Network Policy provider and will be implemented in the cluster
- NetworkPolicyConditionStatusAccepted NetworkPolicyConditionType = "Accepted"
-
- // NetworkPolicyConditionStatusPartialFailure represents status of a Network Policy that could be partially
- // parsed by the Network Policy provider and may not be completely implemented due to a lack of a feature or some
- // other condition
- NetworkPolicyConditionStatusPartialFailure NetworkPolicyConditionType = "PartialFailure"
-
- // NetworkPolicyConditionStatusFailure represents status of a Network Policy that could not be parsed by the
- // Network Policy provider and will not be implemented in the cluster
- NetworkPolicyConditionStatusFailure NetworkPolicyConditionType = "Failure"
-)
-
-// NetworkPolicyConditionReason defines the set of reasons that explain why a
-// particular NetworkPolicy condition type has been raised.
-type NetworkPolicyConditionReason string
-
-const (
- // NetworkPolicyConditionReasonFeatureNotSupported represents a reason where the Network Policy may not have been
- // implemented in the cluster due to a lack of some feature not supported by the Network Policy provider
- NetworkPolicyConditionReasonFeatureNotSupported NetworkPolicyConditionReason = "FeatureNotSupported"
-)
-
-// NetworkPolicyStatus describe the current state of the NetworkPolicy.
-type NetworkPolicyStatus struct {
- // Conditions holds an array of metav1.Condition that describe the state of the NetworkPolicy.
- // Current service state
- // +optional
- // +patchMergeKey=type
- // +patchStrategy=merge
- // +listType=map
- // +listMapKey=type
- Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"`
-}
-
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +k8s:prerelease-lifecycle-gen:introduced=1.3
// +k8s:prerelease-lifecycle-gen:deprecated=1.9
diff --git a/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go
index 39aaf48537..408022c9d8 100644
--- a/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go
+++ b/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go
@@ -338,7 +338,6 @@ var map_NetworkPolicy = map[string]string{
"": "DEPRECATED 1.9 - This group version of NetworkPolicy is deprecated by networking/v1/NetworkPolicy. NetworkPolicy describes what network traffic is allowed for a set of Pods",
"metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
"spec": "Specification of the desired behavior for this NetworkPolicy.",
- "status": "Status is the current state of the NetworkPolicy. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status",
}
func (NetworkPolicy) SwaggerDoc() map[string]string {
@@ -409,15 +408,6 @@ func (NetworkPolicySpec) SwaggerDoc() map[string]string {
return map_NetworkPolicySpec
}
-var map_NetworkPolicyStatus = map[string]string{
- "": "NetworkPolicyStatus describe the current state of the NetworkPolicy.",
- "conditions": "Conditions holds an array of metav1.Condition that describe the state of the NetworkPolicy. Current service state",
-}
-
-func (NetworkPolicyStatus) SwaggerDoc() map[string]string {
- return map_NetworkPolicyStatus
-}
-
var map_ReplicaSet = map[string]string{
"": "DEPRECATED - This group version of ReplicaSet is deprecated by apps/v1beta2/ReplicaSet. See the release notes for more information. ReplicaSet ensures that a specified number of pod replicas are running at any given time.",
"metadata": "If the Labels of a ReplicaSet are empty, they are defaulted to be the same as the Pod(s) that the ReplicaSet manages. Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
diff --git a/vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go
index b6e9272992..6b474ae483 100644
--- a/vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go
+++ b/vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go
@@ -725,7 +725,6 @@ func (in *NetworkPolicy) DeepCopyInto(out *NetworkPolicy) {
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
- in.Status.DeepCopyInto(&out.Status)
return
}
@@ -938,29 +937,6 @@ func (in *NetworkPolicySpec) DeepCopy() *NetworkPolicySpec {
return out
}
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *NetworkPolicyStatus) DeepCopyInto(out *NetworkPolicyStatus) {
- *out = *in
- if in.Conditions != nil {
- in, out := &in.Conditions, &out.Conditions
- *out = make([]v1.Condition, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkPolicyStatus.
-func (in *NetworkPolicyStatus) DeepCopy() *NetworkPolicyStatus {
- if in == nil {
- return nil
- }
- out := new(NetworkPolicyStatus)
- in.DeepCopyInto(out)
- return out
-}
-
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ReplicaSet) DeepCopyInto(out *ReplicaSet) {
*out = *in
diff --git a/vendor/k8s.io/api/flowcontrol/v1alpha1/generated.pb.go b/vendor/k8s.io/api/flowcontrol/v1alpha1/generated.pb.go
index cf5fc5600b..b54e1ceefb 100644
--- a/vendor/k8s.io/api/flowcontrol/v1alpha1/generated.pb.go
+++ b/vendor/k8s.io/api/flowcontrol/v1alpha1/generated.pb.go
@@ -43,10 +43,38 @@ var _ = math.Inf
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+func (m *ExemptPriorityLevelConfiguration) Reset() { *m = ExemptPriorityLevelConfiguration{} }
+func (*ExemptPriorityLevelConfiguration) ProtoMessage() {}
+func (*ExemptPriorityLevelConfiguration) Descriptor() ([]byte, []int) {
+ return fileDescriptor_45ba024d525b289b, []int{0}
+}
+func (m *ExemptPriorityLevelConfiguration) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ExemptPriorityLevelConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ExemptPriorityLevelConfiguration) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ExemptPriorityLevelConfiguration.Merge(m, src)
+}
+func (m *ExemptPriorityLevelConfiguration) XXX_Size() int {
+ return m.Size()
+}
+func (m *ExemptPriorityLevelConfiguration) XXX_DiscardUnknown() {
+ xxx_messageInfo_ExemptPriorityLevelConfiguration.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ExemptPriorityLevelConfiguration proto.InternalMessageInfo
+
func (m *FlowDistinguisherMethod) Reset() { *m = FlowDistinguisherMethod{} }
func (*FlowDistinguisherMethod) ProtoMessage() {}
func (*FlowDistinguisherMethod) Descriptor() ([]byte, []int) {
- return fileDescriptor_45ba024d525b289b, []int{0}
+ return fileDescriptor_45ba024d525b289b, []int{1}
}
func (m *FlowDistinguisherMethod) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -74,7 +102,7 @@ var xxx_messageInfo_FlowDistinguisherMethod proto.InternalMessageInfo
func (m *FlowSchema) Reset() { *m = FlowSchema{} }
func (*FlowSchema) ProtoMessage() {}
func (*FlowSchema) Descriptor() ([]byte, []int) {
- return fileDescriptor_45ba024d525b289b, []int{1}
+ return fileDescriptor_45ba024d525b289b, []int{2}
}
func (m *FlowSchema) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -102,7 +130,7 @@ var xxx_messageInfo_FlowSchema proto.InternalMessageInfo
func (m *FlowSchemaCondition) Reset() { *m = FlowSchemaCondition{} }
func (*FlowSchemaCondition) ProtoMessage() {}
func (*FlowSchemaCondition) Descriptor() ([]byte, []int) {
- return fileDescriptor_45ba024d525b289b, []int{2}
+ return fileDescriptor_45ba024d525b289b, []int{3}
}
func (m *FlowSchemaCondition) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -130,7 +158,7 @@ var xxx_messageInfo_FlowSchemaCondition proto.InternalMessageInfo
func (m *FlowSchemaList) Reset() { *m = FlowSchemaList{} }
func (*FlowSchemaList) ProtoMessage() {}
func (*FlowSchemaList) Descriptor() ([]byte, []int) {
- return fileDescriptor_45ba024d525b289b, []int{3}
+ return fileDescriptor_45ba024d525b289b, []int{4}
}
func (m *FlowSchemaList) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -158,7 +186,7 @@ var xxx_messageInfo_FlowSchemaList proto.InternalMessageInfo
func (m *FlowSchemaSpec) Reset() { *m = FlowSchemaSpec{} }
func (*FlowSchemaSpec) ProtoMessage() {}
func (*FlowSchemaSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_45ba024d525b289b, []int{4}
+ return fileDescriptor_45ba024d525b289b, []int{5}
}
func (m *FlowSchemaSpec) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -186,7 +214,7 @@ var xxx_messageInfo_FlowSchemaSpec proto.InternalMessageInfo
func (m *FlowSchemaStatus) Reset() { *m = FlowSchemaStatus{} }
func (*FlowSchemaStatus) ProtoMessage() {}
func (*FlowSchemaStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_45ba024d525b289b, []int{5}
+ return fileDescriptor_45ba024d525b289b, []int{6}
}
func (m *FlowSchemaStatus) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -214,7 +242,7 @@ var xxx_messageInfo_FlowSchemaStatus proto.InternalMessageInfo
func (m *GroupSubject) Reset() { *m = GroupSubject{} }
func (*GroupSubject) ProtoMessage() {}
func (*GroupSubject) Descriptor() ([]byte, []int) {
- return fileDescriptor_45ba024d525b289b, []int{6}
+ return fileDescriptor_45ba024d525b289b, []int{7}
}
func (m *GroupSubject) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -242,7 +270,7 @@ var xxx_messageInfo_GroupSubject proto.InternalMessageInfo
func (m *LimitResponse) Reset() { *m = LimitResponse{} }
func (*LimitResponse) ProtoMessage() {}
func (*LimitResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_45ba024d525b289b, []int{7}
+ return fileDescriptor_45ba024d525b289b, []int{8}
}
func (m *LimitResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -270,7 +298,7 @@ var xxx_messageInfo_LimitResponse proto.InternalMessageInfo
func (m *LimitedPriorityLevelConfiguration) Reset() { *m = LimitedPriorityLevelConfiguration{} }
func (*LimitedPriorityLevelConfiguration) ProtoMessage() {}
func (*LimitedPriorityLevelConfiguration) Descriptor() ([]byte, []int) {
- return fileDescriptor_45ba024d525b289b, []int{8}
+ return fileDescriptor_45ba024d525b289b, []int{9}
}
func (m *LimitedPriorityLevelConfiguration) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -298,7 +326,7 @@ var xxx_messageInfo_LimitedPriorityLevelConfiguration proto.InternalMessageInfo
func (m *NonResourcePolicyRule) Reset() { *m = NonResourcePolicyRule{} }
func (*NonResourcePolicyRule) ProtoMessage() {}
func (*NonResourcePolicyRule) Descriptor() ([]byte, []int) {
- return fileDescriptor_45ba024d525b289b, []int{9}
+ return fileDescriptor_45ba024d525b289b, []int{10}
}
func (m *NonResourcePolicyRule) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -326,7 +354,7 @@ var xxx_messageInfo_NonResourcePolicyRule proto.InternalMessageInfo
func (m *PolicyRulesWithSubjects) Reset() { *m = PolicyRulesWithSubjects{} }
func (*PolicyRulesWithSubjects) ProtoMessage() {}
func (*PolicyRulesWithSubjects) Descriptor() ([]byte, []int) {
- return fileDescriptor_45ba024d525b289b, []int{10}
+ return fileDescriptor_45ba024d525b289b, []int{11}
}
func (m *PolicyRulesWithSubjects) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -354,7 +382,7 @@ var xxx_messageInfo_PolicyRulesWithSubjects proto.InternalMessageInfo
func (m *PriorityLevelConfiguration) Reset() { *m = PriorityLevelConfiguration{} }
func (*PriorityLevelConfiguration) ProtoMessage() {}
func (*PriorityLevelConfiguration) Descriptor() ([]byte, []int) {
- return fileDescriptor_45ba024d525b289b, []int{11}
+ return fileDescriptor_45ba024d525b289b, []int{12}
}
func (m *PriorityLevelConfiguration) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -382,7 +410,7 @@ var xxx_messageInfo_PriorityLevelConfiguration proto.InternalMessageInfo
func (m *PriorityLevelConfigurationCondition) Reset() { *m = PriorityLevelConfigurationCondition{} }
func (*PriorityLevelConfigurationCondition) ProtoMessage() {}
func (*PriorityLevelConfigurationCondition) Descriptor() ([]byte, []int) {
- return fileDescriptor_45ba024d525b289b, []int{12}
+ return fileDescriptor_45ba024d525b289b, []int{13}
}
func (m *PriorityLevelConfigurationCondition) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -410,7 +438,7 @@ var xxx_messageInfo_PriorityLevelConfigurationCondition proto.InternalMessageInf
func (m *PriorityLevelConfigurationList) Reset() { *m = PriorityLevelConfigurationList{} }
func (*PriorityLevelConfigurationList) ProtoMessage() {}
func (*PriorityLevelConfigurationList) Descriptor() ([]byte, []int) {
- return fileDescriptor_45ba024d525b289b, []int{13}
+ return fileDescriptor_45ba024d525b289b, []int{14}
}
func (m *PriorityLevelConfigurationList) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -438,7 +466,7 @@ var xxx_messageInfo_PriorityLevelConfigurationList proto.InternalMessageInfo
func (m *PriorityLevelConfigurationReference) Reset() { *m = PriorityLevelConfigurationReference{} }
func (*PriorityLevelConfigurationReference) ProtoMessage() {}
func (*PriorityLevelConfigurationReference) Descriptor() ([]byte, []int) {
- return fileDescriptor_45ba024d525b289b, []int{14}
+ return fileDescriptor_45ba024d525b289b, []int{15}
}
func (m *PriorityLevelConfigurationReference) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -466,7 +494,7 @@ var xxx_messageInfo_PriorityLevelConfigurationReference proto.InternalMessageInf
func (m *PriorityLevelConfigurationSpec) Reset() { *m = PriorityLevelConfigurationSpec{} }
func (*PriorityLevelConfigurationSpec) ProtoMessage() {}
func (*PriorityLevelConfigurationSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_45ba024d525b289b, []int{15}
+ return fileDescriptor_45ba024d525b289b, []int{16}
}
func (m *PriorityLevelConfigurationSpec) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -494,7 +522,7 @@ var xxx_messageInfo_PriorityLevelConfigurationSpec proto.InternalMessageInfo
func (m *PriorityLevelConfigurationStatus) Reset() { *m = PriorityLevelConfigurationStatus{} }
func (*PriorityLevelConfigurationStatus) ProtoMessage() {}
func (*PriorityLevelConfigurationStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_45ba024d525b289b, []int{16}
+ return fileDescriptor_45ba024d525b289b, []int{17}
}
func (m *PriorityLevelConfigurationStatus) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -522,7 +550,7 @@ var xxx_messageInfo_PriorityLevelConfigurationStatus proto.InternalMessageInfo
func (m *QueuingConfiguration) Reset() { *m = QueuingConfiguration{} }
func (*QueuingConfiguration) ProtoMessage() {}
func (*QueuingConfiguration) Descriptor() ([]byte, []int) {
- return fileDescriptor_45ba024d525b289b, []int{17}
+ return fileDescriptor_45ba024d525b289b, []int{18}
}
func (m *QueuingConfiguration) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -550,7 +578,7 @@ var xxx_messageInfo_QueuingConfiguration proto.InternalMessageInfo
func (m *ResourcePolicyRule) Reset() { *m = ResourcePolicyRule{} }
func (*ResourcePolicyRule) ProtoMessage() {}
func (*ResourcePolicyRule) Descriptor() ([]byte, []int) {
- return fileDescriptor_45ba024d525b289b, []int{18}
+ return fileDescriptor_45ba024d525b289b, []int{19}
}
func (m *ResourcePolicyRule) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -578,7 +606,7 @@ var xxx_messageInfo_ResourcePolicyRule proto.InternalMessageInfo
func (m *ServiceAccountSubject) Reset() { *m = ServiceAccountSubject{} }
func (*ServiceAccountSubject) ProtoMessage() {}
func (*ServiceAccountSubject) Descriptor() ([]byte, []int) {
- return fileDescriptor_45ba024d525b289b, []int{19}
+ return fileDescriptor_45ba024d525b289b, []int{20}
}
func (m *ServiceAccountSubject) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -606,7 +634,7 @@ var xxx_messageInfo_ServiceAccountSubject proto.InternalMessageInfo
func (m *Subject) Reset() { *m = Subject{} }
func (*Subject) ProtoMessage() {}
func (*Subject) Descriptor() ([]byte, []int) {
- return fileDescriptor_45ba024d525b289b, []int{20}
+ return fileDescriptor_45ba024d525b289b, []int{21}
}
func (m *Subject) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -634,7 +662,7 @@ var xxx_messageInfo_Subject proto.InternalMessageInfo
func (m *UserSubject) Reset() { *m = UserSubject{} }
func (*UserSubject) ProtoMessage() {}
func (*UserSubject) Descriptor() ([]byte, []int) {
- return fileDescriptor_45ba024d525b289b, []int{21}
+ return fileDescriptor_45ba024d525b289b, []int{22}
}
func (m *UserSubject) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -660,6 +688,7 @@ func (m *UserSubject) XXX_DiscardUnknown() {
var xxx_messageInfo_UserSubject proto.InternalMessageInfo
func init() {
+ proto.RegisterType((*ExemptPriorityLevelConfiguration)(nil), "k8s.io.api.flowcontrol.v1alpha1.ExemptPriorityLevelConfiguration")
proto.RegisterType((*FlowDistinguisherMethod)(nil), "k8s.io.api.flowcontrol.v1alpha1.FlowDistinguisherMethod")
proto.RegisterType((*FlowSchema)(nil), "k8s.io.api.flowcontrol.v1alpha1.FlowSchema")
proto.RegisterType((*FlowSchemaCondition)(nil), "k8s.io.api.flowcontrol.v1alpha1.FlowSchemaCondition")
@@ -689,105 +718,142 @@ func init() {
}
var fileDescriptor_45ba024d525b289b = []byte{
- // 1554 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x58, 0x4d, 0x6f, 0x13, 0xc7,
- 0x1b, 0xcf, 0x3a, 0x76, 0x12, 0x4f, 0x5e, 0x99, 0x10, 0xc5, 0xff, 0x20, 0xd9, 0x61, 0xff, 0x52,
- 0xa1, 0x05, 0x76, 0x09, 0x05, 0x4a, 0x85, 0x2a, 0x94, 0x0d, 0x94, 0xb7, 0x24, 0x24, 0x13, 0xa0,
- 0x2a, 0xa2, 0x12, 0x9b, 0xf5, 0xc4, 0x1e, 0x62, 0xef, 0x6e, 0x67, 0x76, 0x9d, 0xa6, 0xe2, 0x50,
- 0xa9, 0x5f, 0xa0, 0x1f, 0x80, 0x63, 0x0f, 0x3d, 0xf7, 0x13, 0xf4, 0x18, 0x55, 0x3d, 0x70, 0xe4,
- 0x64, 0x11, 0xf7, 0xda, 0x0f, 0xd0, 0x72, 0xa8, 0xaa, 0x99, 0x9d, 0xdd, 0xf5, 0xfa, 0x25, 0x6b,
- 0x1a, 0x89, 0x53, 0x6f, 0xd9, 0xe7, 0xe5, 0xf7, 0xbc, 0xcc, 0xf3, 0xe6, 0x80, 0x3b, 0xbb, 0xd7,
- 0x98, 0x46, 0x1c, 0x7d, 0xd7, 0xdf, 0xc6, 0xd4, 0xc6, 0x1e, 0x66, 0x7a, 0x03, 0xdb, 0x65, 0x87,
- 0xea, 0x92, 0x61, 0xba, 0x44, 0xdf, 0xa9, 0x39, 0x7b, 0x96, 0x63, 0x7b, 0xd4, 0xa9, 0xe9, 0x8d,
- 0x25, 0xb3, 0xe6, 0x56, 0xcd, 0x25, 0xbd, 0x82, 0x6d, 0x4c, 0x4d, 0x0f, 0x97, 0x35, 0x97, 0x3a,
- 0x9e, 0x03, 0x4b, 0x81, 0x82, 0x66, 0xba, 0x44, 0x6b, 0x53, 0xd0, 0x42, 0x85, 0x85, 0x0b, 0x15,
- 0xe2, 0x55, 0xfd, 0x6d, 0xcd, 0x72, 0xea, 0x7a, 0xc5, 0xa9, 0x38, 0xba, 0xd0, 0xdb, 0xf6, 0x77,
- 0xc4, 0x97, 0xf8, 0x10, 0x7f, 0x05, 0x78, 0x0b, 0x97, 0x63, 0x07, 0xea, 0xa6, 0x55, 0x25, 0x36,
- 0xa6, 0xfb, 0xba, 0xbb, 0x5b, 0xe1, 0x04, 0xa6, 0xd7, 0xb1, 0x67, 0xea, 0x8d, 0x2e, 0x2f, 0x16,
- 0xf4, 0x7e, 0x5a, 0xd4, 0xb7, 0x3d, 0x52, 0xc7, 0x5d, 0x0a, 0x57, 0xd3, 0x14, 0x98, 0x55, 0xc5,
- 0x75, 0xb3, 0x53, 0x4f, 0x7d, 0x02, 0xe6, 0x3f, 0xaf, 0x39, 0x7b, 0x37, 0x09, 0xf3, 0x88, 0x5d,
- 0xf1, 0x09, 0xab, 0x62, 0xba, 0x86, 0xbd, 0xaa, 0x53, 0x86, 0x37, 0x40, 0xd6, 0xdb, 0x77, 0x71,
- 0x41, 0x59, 0x54, 0xce, 0xe6, 0x8d, 0x73, 0x07, 0xcd, 0xd2, 0x50, 0xab, 0x59, 0xca, 0x3e, 0xdc,
- 0x77, 0xf1, 0xdb, 0x66, 0xe9, 0x54, 0x1f, 0x35, 0xce, 0x46, 0x42, 0x51, 0x7d, 0x99, 0x01, 0x80,
- 0x4b, 0x6d, 0x09, 0xd3, 0xf0, 0x19, 0x18, 0xe3, 0xe1, 0x96, 0x4d, 0xcf, 0x14, 0x98, 0xe3, 0x97,
- 0x2e, 0x6a, 0x71, 0xb2, 0x23, 0xaf, 0x35, 0x77, 0xb7, 0xc2, 0x09, 0x4c, 0xe3, 0xd2, 0x5a, 0x63,
- 0x49, 0x7b, 0xb0, 0xfd, 0x1c, 0x5b, 0xde, 0x1a, 0xf6, 0x4c, 0x03, 0x4a, 0x2f, 0x40, 0x4c, 0x43,
- 0x11, 0x2a, 0xdc, 0x04, 0x59, 0xe6, 0x62, 0xab, 0x90, 0x11, 0xe8, 0xba, 0x96, 0xf2, 0x94, 0x5a,
- 0xec, 0xdc, 0x96, 0x8b, 0x2d, 0x63, 0x22, 0x0c, 0x91, 0x7f, 0x21, 0x01, 0x05, 0xbf, 0x04, 0x23,
- 0xcc, 0x33, 0x3d, 0x9f, 0x15, 0x86, 0x05, 0xe8, 0xd2, 0xbb, 0x80, 0x0a, 0x45, 0x63, 0x4a, 0xc2,
- 0x8e, 0x04, 0xdf, 0x48, 0x02, 0xaa, 0xaf, 0x33, 0x60, 0x36, 0x16, 0x5e, 0x71, 0xec, 0x32, 0xf1,
- 0x88, 0x63, 0xc3, 0xeb, 0x89, 0xbc, 0x9f, 0xe9, 0xc8, 0xfb, 0x7c, 0x0f, 0x95, 0x38, 0xe7, 0xf0,
- 0xd3, 0xc8, 0xdf, 0x8c, 0x50, 0x3f, 0x9d, 0x34, 0xfe, 0xb6, 0x59, 0x9a, 0x8e, 0xd4, 0x92, 0xfe,
- 0xc0, 0x06, 0x80, 0x35, 0x93, 0x79, 0x0f, 0xa9, 0x69, 0xb3, 0x00, 0x96, 0xd4, 0xb1, 0x0c, 0xfb,
- 0xa3, 0xc1, 0x5e, 0x8a, 0x6b, 0x18, 0x0b, 0xd2, 0x24, 0x5c, 0xed, 0x42, 0x43, 0x3d, 0x2c, 0xc0,
- 0x0f, 0xc0, 0x08, 0xc5, 0x26, 0x73, 0xec, 0x42, 0x56, 0xb8, 0x1c, 0xe5, 0x0b, 0x09, 0x2a, 0x92,
- 0x5c, 0xf8, 0x21, 0x18, 0xad, 0x63, 0xc6, 0xcc, 0x0a, 0x2e, 0xe4, 0x84, 0xe0, 0xb4, 0x14, 0x1c,
- 0x5d, 0x0b, 0xc8, 0x28, 0xe4, 0xab, 0xbf, 0x28, 0x60, 0x2a, 0xce, 0xd3, 0x2a, 0x61, 0x1e, 0x7c,
- 0xda, 0x55, 0x7d, 0xda, 0x60, 0x31, 0x71, 0x6d, 0x51, 0x7b, 0x33, 0xd2, 0xdc, 0x58, 0x48, 0x69,
- 0xab, 0xbc, 0x0d, 0x90, 0x23, 0x1e, 0xae, 0xf3, 0xac, 0x0f, 0x9f, 0x1d, 0xbf, 0x74, 0xee, 0x1d,
- 0xaa, 0xc4, 0x98, 0x94, 0xb8, 0xb9, 0xbb, 0x1c, 0x01, 0x05, 0x40, 0xea, 0x1f, 0xc3, 0xed, 0x21,
- 0xf0, 0x8a, 0x84, 0x3f, 0x29, 0x60, 0xc1, 0xa5, 0xc4, 0xa1, 0xc4, 0xdb, 0x5f, 0xc5, 0x0d, 0x5c,
- 0x5b, 0x71, 0xec, 0x1d, 0x52, 0xf1, 0xa9, 0xc9, 0x73, 0x29, 0xa3, 0xba, 0x99, 0x6a, 0x7a, 0xa3,
- 0x2f, 0x04, 0xc2, 0x3b, 0x98, 0x62, 0xdb, 0xc2, 0x86, 0x2a, 0x7d, 0x5a, 0x38, 0x42, 0xf8, 0x08,
- 0x5f, 0xe0, 0x3d, 0x00, 0xeb, 0xa6, 0xc7, 0x73, 0x5a, 0xd9, 0xa0, 0xd8, 0xc2, 0x65, 0x8e, 0x2a,
- 0x4a, 0x32, 0x17, 0xd7, 0xc7, 0x5a, 0x97, 0x04, 0xea, 0xa1, 0x05, 0xbf, 0x57, 0xc0, 0x6c, 0xb9,
- 0x7b, 0xd0, 0xc8, 0xca, 0xbc, 0x36, 0x50, 0xaa, 0x7b, 0x0c, 0x2a, 0x63, 0xbe, 0xd5, 0x2c, 0xcd,
- 0xf6, 0x60, 0xa0, 0x5e, 0xd6, 0xe0, 0x57, 0x20, 0x47, 0xfd, 0x1a, 0x66, 0x85, 0xac, 0x78, 0xe1,
- 0x74, 0xb3, 0x1b, 0x4e, 0x8d, 0x58, 0xfb, 0x88, 0xeb, 0x7c, 0x41, 0xbc, 0xea, 0x96, 0x2f, 0x26,
- 0x16, 0x8b, 0x9f, 0x5b, 0xb0, 0x50, 0x80, 0xaa, 0xbe, 0x00, 0x33, 0x9d, 0x83, 0x03, 0x56, 0x01,
- 0xb0, 0xc2, 0x5e, 0x65, 0x05, 0x45, 0xd8, 0xbd, 0xfc, 0x0e, 0x95, 0x15, 0x35, 0x7a, 0x3c, 0x36,
- 0x23, 0x12, 0x43, 0x6d, 0xd8, 0xea, 0x45, 0x30, 0x71, 0x9b, 0x3a, 0xbe, 0x2b, 0x9d, 0x84, 0x8b,
- 0x20, 0x6b, 0x9b, 0xf5, 0x70, 0x04, 0x45, 0x73, 0x71, 0xdd, 0xac, 0x63, 0x24, 0x38, 0xea, 0x8f,
- 0x0a, 0x98, 0x5c, 0x25, 0x75, 0xe2, 0x21, 0xcc, 0x5c, 0xc7, 0x66, 0x18, 0x5e, 0x49, 0x8c, 0xad,
- 0xd3, 0x1d, 0x63, 0xeb, 0x44, 0x42, 0xb8, 0x6d, 0x60, 0x3d, 0x05, 0xa3, 0x5f, 0xfb, 0xd8, 0x27,
- 0x76, 0x45, 0x8e, 0xed, 0x2b, 0xa9, 0x11, 0x6e, 0x06, 0xf2, 0x89, 0x8a, 0x33, 0xc6, 0xf9, 0x20,
- 0x90, 0x1c, 0x14, 0x42, 0xaa, 0x7f, 0x67, 0xc0, 0x69, 0x61, 0x19, 0x97, 0xfb, 0x57, 0x32, 0x7c,
- 0x0a, 0x0a, 0x26, 0x63, 0x3e, 0xc5, 0xe5, 0x15, 0xc7, 0xb6, 0x7c, 0xca, 0x7b, 0x60, 0x7f, 0xab,
- 0x6a, 0x52, 0xcc, 0x44, 0x38, 0x39, 0x63, 0x51, 0x86, 0x53, 0x58, 0xee, 0x23, 0x87, 0xfa, 0x22,
- 0xc0, 0x5d, 0x30, 0x59, 0x6b, 0x0f, 0x5e, 0xc6, 0xa9, 0xa5, 0xc6, 0x99, 0x48, 0x99, 0x31, 0x27,
- 0x5d, 0x48, 0xa6, 0x1d, 0x25, 0xb1, 0xe1, 0x67, 0x60, 0xba, 0x86, 0xed, 0xb2, 0xb9, 0x5d, 0xc3,
- 0x1b, 0x98, 0x5a, 0xd8, 0xf6, 0x44, 0x9f, 0xe4, 0x8c, 0xd9, 0x56, 0xb3, 0x34, 0xbd, 0x9a, 0x64,
- 0xa1, 0x4e, 0x59, 0xf8, 0x00, 0xcc, 0x6d, 0x3b, 0x94, 0x3a, 0x7b, 0xc4, 0xae, 0x08, 0x3b, 0x21,
- 0x48, 0x56, 0x80, 0xfc, 0xaf, 0xd5, 0x2c, 0xcd, 0x19, 0xbd, 0x04, 0x50, 0x6f, 0x3d, 0x75, 0x0f,
- 0xcc, 0xad, 0xf3, 0xc1, 0xc2, 0x1c, 0x9f, 0x5a, 0x38, 0xee, 0x09, 0x58, 0x02, 0xb9, 0x06, 0xa6,
- 0xdb, 0x41, 0x5d, 0xe7, 0x8d, 0x3c, 0xef, 0x88, 0xc7, 0x9c, 0x80, 0x02, 0x3a, 0x8f, 0xc4, 0x8e,
- 0x35, 0x1f, 0xa1, 0x55, 0x56, 0x18, 0x11, 0xa2, 0x22, 0x92, 0xf5, 0x24, 0x0b, 0x75, 0xca, 0xaa,
- 0x87, 0x19, 0x30, 0xdf, 0xa7, 0x05, 0xe1, 0x63, 0x30, 0xc6, 0xe4, 0xdf, 0xb2, 0xad, 0xce, 0xa6,
- 0x3e, 0x86, 0x54, 0x8e, 0xb7, 0x40, 0x88, 0x86, 0x22, 0x2c, 0xe8, 0x82, 0x49, 0x2a, 0x7d, 0x10,
- 0x46, 0xe5, 0x36, 0xf8, 0x38, 0x15, 0xbc, 0x3b, 0x3f, 0xf1, 0x73, 0xa3, 0x76, 0x44, 0x94, 0x34,
- 0x00, 0x5f, 0x80, 0x99, 0xb6, 0xc0, 0x03, 0xa3, 0xc3, 0xc2, 0xe8, 0xd5, 0x54, 0xa3, 0x3d, 0xdf,
- 0xc5, 0x28, 0x48, 0xbb, 0x33, 0xeb, 0x1d, 0xb8, 0xa8, 0xcb, 0x92, 0xfa, 0x5b, 0x06, 0x1c, 0xb1,
- 0x20, 0xde, 0xc3, 0xc1, 0x67, 0x26, 0x0e, 0xbe, 0x1b, 0xc7, 0x58, 0x7d, 0x7d, 0x0f, 0x40, 0xd2,
- 0x71, 0x00, 0x2e, 0x1f, 0xc7, 0xc8, 0xd1, 0x07, 0xe1, 0x9f, 0x19, 0xf0, 0xff, 0xfe, 0xca, 0xf1,
- 0x81, 0x78, 0x3f, 0x31, 0x69, 0x3f, 0xe9, 0x98, 0xb4, 0x67, 0x06, 0x80, 0xf8, 0xef, 0x60, 0xec,
- 0x38, 0x18, 0xdf, 0x28, 0xa0, 0xd8, 0x3f, 0x6f, 0xef, 0xe1, 0x80, 0x7c, 0x96, 0x3c, 0x20, 0xaf,
- 0x1f, 0xa3, 0xca, 0xfa, 0x1c, 0x94, 0xb7, 0x8f, 0x2a, 0xae, 0xe8, 0xf2, 0x1b, 0x60, 0xf5, 0x1f,
- 0x1c, 0x99, 0x2b, 0x71, 0xa9, 0xa6, 0xfc, 0x84, 0x49, 0x68, 0xdf, 0xb2, 0xf9, 0x02, 0xaa, 0xf3,
- 0x1d, 0x12, 0x54, 0x24, 0x01, 0xa3, 0xb5, 0x60, 0x65, 0xcb, 0xbe, 0x36, 0x06, 0xdb, 0x94, 0x47,
- 0xad, 0xf8, 0xe0, 0x3c, 0x90, 0x62, 0x28, 0xc4, 0x57, 0x5f, 0x2a, 0x60, 0x31, 0xad, 0x5d, 0xe1,
- 0x37, 0x3d, 0xce, 0xb0, 0xe3, 0x5c, 0xd9, 0x83, 0x9f, 0x65, 0x3f, 0x2b, 0xe0, 0x64, 0xaf, 0x63,
- 0x87, 0x77, 0x00, 0xbf, 0x70, 0xa2, 0xf3, 0x24, 0xea, 0x80, 0x4d, 0x41, 0x45, 0x92, 0x0b, 0xcf,
- 0x83, 0xb1, 0xaa, 0x69, 0x97, 0xb7, 0xc8, 0xb7, 0xe1, 0xf1, 0x1d, 0xd5, 0xe0, 0x1d, 0x49, 0x47,
- 0x91, 0x04, 0xbc, 0x09, 0x66, 0x84, 0xde, 0x2a, 0xb6, 0x2b, 0x5e, 0x55, 0x24, 0x4b, 0x1e, 0x0f,
- 0xd1, 0x52, 0xd8, 0xec, 0xe0, 0xa3, 0x2e, 0x0d, 0xf5, 0x2f, 0x05, 0xc0, 0x7f, 0xb3, 0xef, 0xcf,
- 0x81, 0xbc, 0xe9, 0x12, 0x71, 0x86, 0x06, 0x5d, 0x90, 0x37, 0x26, 0x5b, 0xcd, 0x52, 0x7e, 0x79,
- 0xe3, 0x6e, 0x40, 0x44, 0x31, 0x9f, 0x0b, 0x87, 0x8b, 0x30, 0x58, 0x78, 0x52, 0x38, 0x34, 0xcc,
- 0x50, 0xcc, 0x87, 0xd7, 0xc0, 0x84, 0x55, 0xf3, 0x99, 0x87, 0xe9, 0x96, 0xe5, 0xb8, 0x58, 0x4c,
- 0x8d, 0x31, 0xe3, 0xa4, 0x8c, 0x69, 0x62, 0xa5, 0x8d, 0x87, 0x12, 0x92, 0x50, 0x03, 0x80, 0x97,
- 0x3c, 0x73, 0x4d, 0x6e, 0x27, 0x27, 0xec, 0x4c, 0xf1, 0x07, 0x5b, 0x8f, 0xa8, 0xa8, 0x4d, 0x42,
- 0x7d, 0x0e, 0xe6, 0xb6, 0x30, 0x6d, 0x10, 0x0b, 0x2f, 0x5b, 0x96, 0xe3, 0xdb, 0x5e, 0x78, 0x50,
- 0xeb, 0x20, 0x1f, 0x89, 0xc9, 0xae, 0x38, 0x21, 0xed, 0xe7, 0x23, 0x2c, 0x14, 0xcb, 0x44, 0x6d,
- 0x98, 0xe9, 0xdb, 0x86, 0xbf, 0x66, 0xc0, 0x68, 0x0c, 0x9f, 0xdd, 0x25, 0x76, 0x59, 0x22, 0x9f,
- 0x0a, 0xa5, 0xef, 0x13, 0xbb, 0xfc, 0xb6, 0x59, 0x1a, 0x97, 0x62, 0xfc, 0x13, 0x09, 0x41, 0x78,
- 0x0f, 0x64, 0x7d, 0x86, 0xa9, 0x6c, 0xb0, 0xf3, 0xa9, 0xd5, 0xfc, 0x88, 0x61, 0x1a, 0x5e, 0x40,
- 0x63, 0x1c, 0x9a, 0x13, 0x90, 0xc0, 0x80, 0xeb, 0x20, 0x57, 0xe1, 0xaf, 0x22, 0x27, 0xff, 0x85,
- 0x54, 0xb0, 0xf6, 0x9f, 0x1a, 0x41, 0x21, 0x08, 0x0a, 0x0a, 0x60, 0x20, 0x05, 0x53, 0x2c, 0x91,
- 0x44, 0xf1, 0x60, 0x83, 0x5c, 0x34, 0x3d, 0x73, 0x6f, 0xc0, 0x56, 0xb3, 0x34, 0x95, 0x64, 0xa1,
- 0x0e, 0x0b, 0xaa, 0x0e, 0xc6, 0xdb, 0x42, 0x4c, 0x1f, 0x82, 0xc6, 0xad, 0x83, 0xc3, 0xe2, 0xd0,
- 0xab, 0xc3, 0xe2, 0xd0, 0xeb, 0xc3, 0xe2, 0xd0, 0x77, 0xad, 0xa2, 0x72, 0xd0, 0x2a, 0x2a, 0xaf,
- 0x5a, 0x45, 0xe5, 0x75, 0xab, 0xa8, 0xbc, 0x69, 0x15, 0x95, 0x1f, 0x7e, 0x2f, 0x0e, 0x3d, 0x29,
- 0xa5, 0xfc, 0xf7, 0xf1, 0x9f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x48, 0x2e, 0x29, 0x16, 0xb8, 0x14,
- 0x00, 0x00,
+ // 1621 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x58, 0x4d, 0x6f, 0xdb, 0x46,
+ 0x1a, 0x36, 0x65, 0xc9, 0xb6, 0xc6, 0x9f, 0x19, 0xc7, 0xb0, 0xd6, 0x59, 0x48, 0x0e, 0x17, 0xd8,
+ 0x64, 0x37, 0x09, 0x15, 0x67, 0x93, 0x6c, 0x16, 0xc1, 0x22, 0x30, 0x93, 0x6c, 0xbe, 0x6c, 0xc7,
+ 0x1e, 0x27, 0xd9, 0x36, 0x48, 0x81, 0xd0, 0xd4, 0x58, 0x9a, 0x58, 0x22, 0xd9, 0x19, 0x52, 0x8e,
+ 0x8b, 0x1c, 0x0a, 0xf4, 0x0f, 0xf4, 0x07, 0xe4, 0xd8, 0x43, 0x6f, 0x05, 0x7a, 0xed, 0xa5, 0xc7,
+ 0xa0, 0xe8, 0x21, 0xc7, 0x9c, 0x84, 0x58, 0xbd, 0xf6, 0x07, 0xb4, 0x39, 0x14, 0xc5, 0x0c, 0x87,
+ 0xa4, 0x28, 0x89, 0xa2, 0x52, 0x03, 0x39, 0xf5, 0x66, 0xbe, 0x1f, 0xcf, 0x3b, 0xf3, 0xce, 0xfb,
+ 0xf1, 0xc8, 0xe0, 0xf6, 0xde, 0x15, 0xa6, 0x11, 0xbb, 0xbc, 0xe7, 0xed, 0x60, 0x6a, 0x61, 0x17,
+ 0xb3, 0x72, 0x13, 0x5b, 0x15, 0x9b, 0x96, 0xa5, 0xc2, 0x70, 0x48, 0x79, 0xb7, 0x6e, 0xef, 0x9b,
+ 0xb6, 0xe5, 0x52, 0xbb, 0x5e, 0x6e, 0xae, 0x18, 0x75, 0xa7, 0x66, 0xac, 0x94, 0xab, 0xd8, 0xc2,
+ 0xd4, 0x70, 0x71, 0x45, 0x73, 0xa8, 0xed, 0xda, 0xb0, 0xe4, 0x3b, 0x68, 0x86, 0x43, 0xb4, 0x0e,
+ 0x07, 0x2d, 0x70, 0x58, 0x3a, 0x57, 0x25, 0x6e, 0xcd, 0xdb, 0xd1, 0x4c, 0xbb, 0x51, 0xae, 0xda,
+ 0x55, 0xbb, 0x2c, 0xfc, 0x76, 0xbc, 0x5d, 0xf1, 0x25, 0x3e, 0xc4, 0x5f, 0x3e, 0xde, 0xd2, 0xc5,
+ 0xe8, 0x00, 0x0d, 0xc3, 0xac, 0x11, 0x0b, 0xd3, 0x83, 0xb2, 0xb3, 0x57, 0xe5, 0x02, 0x56, 0x6e,
+ 0x60, 0xd7, 0x28, 0x37, 0x7b, 0x4e, 0xb1, 0x54, 0x4e, 0xf2, 0xa2, 0x9e, 0xe5, 0x92, 0x06, 0xee,
+ 0x71, 0xb8, 0x9c, 0xe6, 0xc0, 0xcc, 0x1a, 0x6e, 0x18, 0xdd, 0x7e, 0xea, 0x77, 0x0a, 0x58, 0xbe,
+ 0xf9, 0x1c, 0x37, 0x1c, 0x77, 0x93, 0x12, 0x9b, 0x12, 0xf7, 0x60, 0x0d, 0x37, 0x71, 0xfd, 0xba,
+ 0x6d, 0xed, 0x92, 0xaa, 0x47, 0x0d, 0x97, 0xd8, 0x16, 0xfc, 0x08, 0x14, 0x2c, 0xbb, 0x41, 0x2c,
+ 0x83, 0xcb, 0x4d, 0x8f, 0x52, 0x6c, 0x99, 0x07, 0xdb, 0x35, 0x83, 0x62, 0x56, 0x50, 0x96, 0x95,
+ 0xd3, 0x39, 0xfd, 0xaf, 0xed, 0x56, 0xa9, 0xb0, 0x91, 0x60, 0x83, 0x12, 0xbd, 0xe1, 0x7f, 0xc1,
+ 0x6c, 0x1d, 0x5b, 0x15, 0x63, 0xa7, 0x8e, 0x37, 0x31, 0x35, 0xb1, 0xe5, 0x16, 0x32, 0x02, 0x70,
+ 0xbe, 0xdd, 0x2a, 0xcd, 0xae, 0xc5, 0x55, 0xa8, 0xdb, 0x56, 0x7d, 0x0c, 0x16, 0xff, 0x57, 0xb7,
+ 0xf7, 0x6f, 0x10, 0xe6, 0x12, 0xab, 0xea, 0x11, 0x56, 0xc3, 0x74, 0x1d, 0xbb, 0x35, 0xbb, 0x02,
+ 0xaf, 0x81, 0xac, 0x7b, 0xe0, 0x60, 0x71, 0xbe, 0xbc, 0x7e, 0xe6, 0x55, 0xab, 0x34, 0xd2, 0x6e,
+ 0x95, 0xb2, 0x0f, 0x0e, 0x1c, 0xfc, 0xae, 0x55, 0x3a, 0x91, 0xe0, 0xc6, 0xd5, 0x48, 0x38, 0xaa,
+ 0x2f, 0x33, 0x00, 0x70, 0xab, 0x6d, 0x91, 0x38, 0xf8, 0x14, 0x4c, 0xf0, 0xc7, 0xaa, 0x18, 0xae,
+ 0x21, 0x30, 0x27, 0x2f, 0x9c, 0xd7, 0xa2, 0x52, 0x09, 0x73, 0xae, 0x39, 0x7b, 0x55, 0x2e, 0x60,
+ 0x1a, 0xb7, 0xd6, 0x9a, 0x2b, 0xda, 0xfd, 0x9d, 0x67, 0xd8, 0x74, 0xd7, 0xb1, 0x6b, 0xe8, 0x50,
+ 0x9e, 0x02, 0x44, 0x32, 0x14, 0xa2, 0xc2, 0x2d, 0x90, 0x65, 0x0e, 0x36, 0x45, 0x02, 0x26, 0x2f,
+ 0x94, 0xb5, 0x94, 0x42, 0xd4, 0xa2, 0xc3, 0x6d, 0x3b, 0xd8, 0xd4, 0xa7, 0x82, 0x2b, 0xf2, 0x2f,
+ 0x24, 0xa0, 0xe0, 0xc7, 0x60, 0x8c, 0xb9, 0x86, 0xeb, 0xb1, 0xc2, 0xa8, 0x00, 0x5d, 0x79, 0x1f,
+ 0x50, 0xe1, 0xa8, 0xcf, 0x48, 0xd8, 0x31, 0xff, 0x1b, 0x49, 0x40, 0xf5, 0x4d, 0x06, 0xcc, 0x47,
+ 0xc6, 0xd7, 0x6d, 0xab, 0x42, 0x44, 0xad, 0x5c, 0x8d, 0xe5, 0xfd, 0x54, 0x57, 0xde, 0x17, 0xfb,
+ 0xb8, 0x44, 0x39, 0x87, 0xff, 0x09, 0xcf, 0x9b, 0x11, 0xee, 0x27, 0xe3, 0xc1, 0xdf, 0xb5, 0x4a,
+ 0xb3, 0xa1, 0x5b, 0xfc, 0x3c, 0xb0, 0x09, 0x60, 0xdd, 0x60, 0xee, 0x03, 0x6a, 0x58, 0xcc, 0x87,
+ 0x25, 0x0d, 0x2c, 0xaf, 0xfd, 0xcf, 0xe1, 0x5e, 0x8a, 0x7b, 0xe8, 0x4b, 0x32, 0x24, 0x5c, 0xeb,
+ 0x41, 0x43, 0x7d, 0x22, 0xc0, 0xbf, 0x83, 0x31, 0x8a, 0x0d, 0x66, 0x5b, 0x85, 0xac, 0x38, 0x72,
+ 0x98, 0x2f, 0x24, 0xa4, 0x48, 0x6a, 0xe1, 0x3f, 0xc0, 0x78, 0x03, 0x33, 0x66, 0x54, 0x71, 0x21,
+ 0x27, 0x0c, 0x67, 0xa5, 0xe1, 0xf8, 0xba, 0x2f, 0x46, 0x81, 0x5e, 0xfd, 0x5e, 0x01, 0x33, 0x51,
+ 0x9e, 0xd6, 0x08, 0x73, 0xe1, 0x93, 0x9e, 0xea, 0xd3, 0x86, 0xbb, 0x13, 0xf7, 0x16, 0xb5, 0x37,
+ 0x27, 0xc3, 0x4d, 0x04, 0x92, 0x8e, 0xca, 0xdb, 0x04, 0x39, 0xe2, 0xe2, 0x06, 0xcf, 0xfa, 0xe8,
+ 0xe9, 0xc9, 0x0b, 0x67, 0xde, 0xa3, 0x4a, 0xf4, 0x69, 0x89, 0x9b, 0xbb, 0xc3, 0x11, 0x90, 0x0f,
+ 0xa4, 0xfe, 0x3c, 0xda, 0x79, 0x05, 0x5e, 0x91, 0xf0, 0x6b, 0x05, 0x2c, 0x39, 0x89, 0x33, 0x46,
+ 0xde, 0xea, 0x46, 0x6a, 0xe8, 0xe4, 0x31, 0x85, 0xf0, 0x2e, 0xe6, 0xb3, 0x05, 0xeb, 0xaa, 0x3c,
+ 0xd3, 0xd2, 0x00, 0xe3, 0x01, 0x67, 0x81, 0x77, 0x01, 0x6c, 0x18, 0x2e, 0xcf, 0x69, 0x75, 0x93,
+ 0x62, 0x13, 0x57, 0x38, 0xaa, 0x1c, 0x4c, 0x61, 0x7d, 0xac, 0xf7, 0x58, 0xa0, 0x3e, 0x5e, 0xf0,
+ 0x0b, 0x05, 0xcc, 0x57, 0x7a, 0x07, 0x8d, 0xac, 0xcc, 0x2b, 0x43, 0xa5, 0xba, 0xcf, 0xa0, 0xd2,
+ 0x17, 0xdb, 0xad, 0xd2, 0x7c, 0x1f, 0x05, 0xea, 0x17, 0x0d, 0x7e, 0x02, 0x72, 0xd4, 0xab, 0x63,
+ 0x56, 0xc8, 0x8a, 0x17, 0x4e, 0x0f, 0xbb, 0x69, 0xd7, 0x89, 0x79, 0x80, 0xb8, 0xcf, 0xff, 0x89,
+ 0x5b, 0xdb, 0xf6, 0xc4, 0xc4, 0x62, 0xd1, 0x73, 0x0b, 0x15, 0xf2, 0x51, 0xd5, 0x17, 0x60, 0xae,
+ 0x7b, 0x70, 0xc0, 0x1a, 0x00, 0x66, 0xd0, 0xab, 0x7c, 0x4d, 0xf0, 0xb8, 0x17, 0xdf, 0xa3, 0xb2,
+ 0xc2, 0x46, 0x8f, 0xc6, 0x66, 0x28, 0x62, 0xa8, 0x03, 0x5b, 0x3d, 0x0f, 0xa6, 0x6e, 0x51, 0xdb,
+ 0x73, 0xe4, 0x21, 0xe1, 0x32, 0xc8, 0x5a, 0x46, 0x23, 0x18, 0x41, 0xe1, 0x5c, 0xdc, 0x30, 0x1a,
+ 0x18, 0x09, 0x8d, 0xfa, 0x95, 0x02, 0xa6, 0xd7, 0x48, 0x83, 0xb8, 0x08, 0x33, 0xc7, 0xb6, 0x18,
+ 0x86, 0x97, 0x62, 0x63, 0xeb, 0x64, 0xd7, 0xd8, 0x3a, 0x16, 0x33, 0xee, 0x18, 0x58, 0x4f, 0xc0,
+ 0xf8, 0xa7, 0x1e, 0xf6, 0x88, 0x55, 0x95, 0x63, 0xfb, 0x52, 0xea, 0x0d, 0xb7, 0x7c, 0xfb, 0x58,
+ 0xc5, 0xe9, 0x93, 0x7c, 0x10, 0x48, 0x0d, 0x0a, 0x20, 0xd5, 0xdf, 0x32, 0xe0, 0xa4, 0x88, 0x8c,
+ 0x2b, 0x03, 0xb6, 0xf3, 0x13, 0x50, 0x30, 0x18, 0xf3, 0x28, 0xae, 0x24, 0x6d, 0xe7, 0x65, 0x79,
+ 0x9d, 0xc2, 0x6a, 0x82, 0x1d, 0x4a, 0x44, 0x80, 0x7b, 0x60, 0xba, 0xde, 0x79, 0x79, 0x79, 0x4f,
+ 0x2d, 0xf5, 0x9e, 0xb1, 0x94, 0xe9, 0x0b, 0xf2, 0x08, 0xf1, 0xb4, 0xa3, 0x38, 0x76, 0x3f, 0x3a,
+ 0x30, 0x3a, 0x3c, 0x1d, 0x80, 0xf7, 0xc1, 0xc2, 0x8e, 0x4d, 0xa9, 0xbd, 0x4f, 0xac, 0xaa, 0x88,
+ 0x13, 0x80, 0x64, 0x05, 0xc8, 0x5f, 0xda, 0xad, 0xd2, 0x82, 0xde, 0xcf, 0x00, 0xf5, 0xf7, 0x53,
+ 0xf7, 0xc1, 0xc2, 0x06, 0x1f, 0x2c, 0xcc, 0xf6, 0xa8, 0x89, 0xa3, 0x9e, 0x80, 0x25, 0x90, 0x6b,
+ 0x62, 0xba, 0xe3, 0xd7, 0x75, 0x5e, 0xcf, 0xf3, 0x8e, 0x78, 0xc4, 0x05, 0xc8, 0x97, 0xf3, 0x9b,
+ 0x58, 0x91, 0xe7, 0x43, 0xb4, 0xc6, 0x0a, 0x63, 0xc2, 0x54, 0xdc, 0x64, 0x23, 0xae, 0x42, 0xdd,
+ 0xb6, 0xea, 0x61, 0x06, 0x2c, 0x26, 0xb4, 0x20, 0x7c, 0x04, 0x26, 0x98, 0xfc, 0x5b, 0xb6, 0xd5,
+ 0xe9, 0xd4, 0xc7, 0x90, 0xce, 0xd1, 0x16, 0x08, 0xd0, 0x50, 0x88, 0x05, 0x1d, 0x30, 0x4d, 0xe5,
+ 0x19, 0x44, 0x50, 0xb9, 0x0d, 0xfe, 0x95, 0x0a, 0xde, 0x9b, 0x9f, 0xe8, 0xb9, 0x51, 0x27, 0x22,
+ 0x8a, 0x07, 0x80, 0x2f, 0xc0, 0x5c, 0xc7, 0xc5, 0xfd, 0xa0, 0xa3, 0x22, 0xe8, 0xe5, 0xd4, 0xa0,
+ 0x7d, 0xdf, 0x45, 0x2f, 0xc8, 0xb8, 0x73, 0x1b, 0x5d, 0xb8, 0xa8, 0x27, 0x92, 0xfa, 0x63, 0x06,
+ 0x0c, 0x58, 0x10, 0x1f, 0x80, 0xf0, 0x19, 0x31, 0xc2, 0x77, 0xed, 0x08, 0xab, 0x2f, 0x91, 0x00,
+ 0x92, 0x2e, 0x02, 0xb8, 0x7a, 0x94, 0x20, 0x83, 0x09, 0xe1, 0x2f, 0x19, 0xf0, 0xb7, 0x64, 0xe7,
+ 0x88, 0x20, 0xde, 0x8b, 0x4d, 0xda, 0x7f, 0x77, 0x4d, 0xda, 0x53, 0x43, 0x40, 0xfc, 0x49, 0x18,
+ 0xbb, 0x08, 0xe3, 0x5b, 0x05, 0x14, 0x93, 0xf3, 0xf6, 0x01, 0x08, 0xe4, 0xd3, 0x38, 0x81, 0xbc,
+ 0x7a, 0x84, 0x2a, 0x4b, 0x20, 0x94, 0xb7, 0x06, 0x15, 0x57, 0xc8, 0xfc, 0x86, 0x58, 0xfd, 0xdf,
+ 0x64, 0x06, 0xe5, 0x4a, 0x30, 0xd5, 0x94, 0x9f, 0x30, 0x31, 0xef, 0x9b, 0x16, 0x5f, 0x40, 0x0d,
+ 0xbe, 0x43, 0xfc, 0x8a, 0x24, 0x60, 0xbc, 0xee, 0xaf, 0x6c, 0xd9, 0xd7, 0xfa, 0x70, 0x9b, 0x72,
+ 0xd0, 0x8a, 0xf7, 0xe9, 0x81, 0x34, 0x43, 0x01, 0x3e, 0xc4, 0x60, 0x0c, 0x8b, 0x9f, 0xee, 0x43,
+ 0x37, 0x77, 0xda, 0x2f, 0x7d, 0x1d, 0xf0, 0x42, 0xf4, 0xad, 0x90, 0x04, 0x57, 0x5f, 0x2a, 0x60,
+ 0x39, 0x6d, 0x2a, 0xc0, 0xe7, 0x7d, 0xd8, 0xde, 0x51, 0xc8, 0xfc, 0xf0, 0xec, 0xef, 0x5b, 0x05,
+ 0x1c, 0xef, 0xc7, 0xa9, 0x78, 0xa3, 0x71, 0x22, 0x15, 0xb2, 0xa0, 0xb0, 0xd1, 0xb6, 0x84, 0x14,
+ 0x49, 0x2d, 0x3c, 0x0b, 0x26, 0x6a, 0x86, 0x55, 0xd9, 0x26, 0x9f, 0x05, 0x1c, 0x3f, 0x2c, 0xf5,
+ 0xdb, 0x52, 0x8e, 0x42, 0x0b, 0x78, 0x03, 0xcc, 0x09, 0xbf, 0x35, 0x6c, 0x55, 0xdd, 0x9a, 0x78,
+ 0x13, 0xc9, 0x51, 0xc2, 0xdd, 0xb3, 0xd5, 0xa5, 0x47, 0x3d, 0x1e, 0xea, 0xaf, 0x0a, 0x80, 0x7f,
+ 0x84, 0x56, 0x9c, 0x01, 0x79, 0xc3, 0x21, 0x82, 0xed, 0xfa, 0xcd, 0x96, 0xd7, 0xa7, 0xdb, 0xad,
+ 0x52, 0x7e, 0x75, 0xf3, 0x8e, 0x2f, 0x44, 0x91, 0x9e, 0x1b, 0x07, 0xfb, 0xd6, 0xdf, 0xab, 0xd2,
+ 0x38, 0x08, 0xcc, 0x50, 0xa4, 0x87, 0x57, 0xc0, 0x94, 0x59, 0xf7, 0x98, 0x8b, 0xe9, 0xb6, 0x69,
+ 0x3b, 0x58, 0x0c, 0xa7, 0x09, 0xfd, 0xb8, 0xbc, 0xd3, 0xd4, 0xf5, 0x0e, 0x1d, 0x8a, 0x59, 0x42,
+ 0x0d, 0x00, 0xde, 0x59, 0xcc, 0x31, 0x78, 0x9c, 0x9c, 0x88, 0x33, 0xc3, 0x1f, 0x6c, 0x23, 0x94,
+ 0xa2, 0x0e, 0x0b, 0xf5, 0x19, 0x58, 0xd8, 0xc6, 0xb4, 0x49, 0x4c, 0xbc, 0x6a, 0x9a, 0xb6, 0x67,
+ 0xb9, 0x01, 0x6f, 0x2f, 0x83, 0x7c, 0x68, 0x26, 0x9b, 0xef, 0x98, 0x8c, 0x9f, 0x0f, 0xb1, 0x50,
+ 0x64, 0x13, 0x76, 0x7b, 0x26, 0xb1, 0xdb, 0x7f, 0xc8, 0x80, 0xf1, 0x08, 0x3e, 0xbb, 0x47, 0xac,
+ 0x8a, 0x44, 0x3e, 0x11, 0x58, 0xdf, 0x23, 0x56, 0xe5, 0x5d, 0xab, 0x34, 0x29, 0xcd, 0xf8, 0x27,
+ 0x12, 0x86, 0xf0, 0x2e, 0xc8, 0x7a, 0x0c, 0x53, 0xd9, 0xc7, 0x67, 0x53, 0xab, 0xf9, 0x21, 0xc3,
+ 0x34, 0x20, 0x5a, 0x13, 0x1c, 0x9a, 0x0b, 0x90, 0xc0, 0x80, 0x1b, 0x20, 0x57, 0xe5, 0xaf, 0x22,
+ 0x5b, 0xf5, 0x5c, 0x2a, 0x58, 0xe7, 0x2f, 0x1a, 0xbf, 0x10, 0x84, 0x04, 0xf9, 0x30, 0x90, 0x82,
+ 0x19, 0x16, 0x4b, 0xa2, 0x78, 0xb0, 0x61, 0x88, 0x53, 0xdf, 0xdc, 0xeb, 0xb0, 0xdd, 0x2a, 0xcd,
+ 0xc4, 0x55, 0xa8, 0x2b, 0x82, 0x5a, 0x06, 0x93, 0x1d, 0x57, 0x4c, 0x9f, 0xb5, 0xfa, 0xcd, 0x57,
+ 0x87, 0xc5, 0x91, 0xd7, 0x87, 0xc5, 0x91, 0x37, 0x87, 0xc5, 0x91, 0xcf, 0xdb, 0x45, 0xe5, 0x55,
+ 0xbb, 0xa8, 0xbc, 0x6e, 0x17, 0x95, 0x37, 0xed, 0xa2, 0xf2, 0xb6, 0x5d, 0x54, 0xbe, 0xfc, 0xa9,
+ 0x38, 0xf2, 0xb8, 0x94, 0xf2, 0x2f, 0xda, 0xdf, 0x03, 0x00, 0x00, 0xff, 0xff, 0xc1, 0x6c, 0x4e,
+ 0x4e, 0xdd, 0x15, 0x00, 0x00,
+}
+
+func (m *ExemptPriorityLevelConfiguration) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ExemptPriorityLevelConfiguration) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ExemptPriorityLevelConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.LendablePercent != nil {
+ i = encodeVarintGenerated(dAtA, i, uint64(*m.LendablePercent))
+ i--
+ dAtA[i] = 0x10
+ }
+ if m.NominalConcurrencyShares != nil {
+ i = encodeVarintGenerated(dAtA, i, uint64(*m.NominalConcurrencyShares))
+ i--
+ dAtA[i] = 0x8
+ }
+ return len(dAtA) - i, nil
}
func (m *FlowDistinguisherMethod) Marshal() (dAtA []byte, err error) {
@@ -1491,6 +1557,18 @@ func (m *PriorityLevelConfigurationSpec) MarshalToSizedBuffer(dAtA []byte) (int,
_ = i
var l int
_ = l
+ if m.Exempt != nil {
+ {
+ size, err := m.Exempt.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
if m.Limited != nil {
{
size, err := m.Limited.MarshalToSizedBuffer(dAtA[:i])
@@ -1783,6 +1861,21 @@ func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
dAtA[offset] = uint8(v)
return base
}
+func (m *ExemptPriorityLevelConfiguration) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.NominalConcurrencyShares != nil {
+ n += 1 + sovGenerated(uint64(*m.NominalConcurrencyShares))
+ }
+ if m.LendablePercent != nil {
+ n += 1 + sovGenerated(uint64(*m.LendablePercent))
+ }
+ return n
+}
+
func (m *FlowDistinguisherMethod) Size() (n int) {
if m == nil {
return 0
@@ -2048,6 +2141,10 @@ func (m *PriorityLevelConfigurationSpec) Size() (n int) {
l = m.Limited.Size()
n += 1 + l + sovGenerated(uint64(l))
}
+ if m.Exempt != nil {
+ l = m.Exempt.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
return n
}
@@ -2165,6 +2262,17 @@ func sovGenerated(x uint64) (n int) {
func sozGenerated(x uint64) (n int) {
return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
+func (this *ExemptPriorityLevelConfiguration) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ExemptPriorityLevelConfiguration{`,
+ `NominalConcurrencyShares:` + valueToStringGenerated(this.NominalConcurrencyShares) + `,`,
+ `LendablePercent:` + valueToStringGenerated(this.LendablePercent) + `,`,
+ `}`,
+ }, "")
+ return s
+}
func (this *FlowDistinguisherMethod) String() string {
if this == nil {
return "nil"
@@ -2381,6 +2489,7 @@ func (this *PriorityLevelConfigurationSpec) String() string {
s := strings.Join([]string{`&PriorityLevelConfigurationSpec{`,
`Type:` + fmt.Sprintf("%v", this.Type) + `,`,
`Limited:` + strings.Replace(this.Limited.String(), "LimitedPriorityLevelConfiguration", "LimitedPriorityLevelConfiguration", 1) + `,`,
+ `Exempt:` + strings.Replace(this.Exempt.String(), "ExemptPriorityLevelConfiguration", "ExemptPriorityLevelConfiguration", 1) + `,`,
`}`,
}, "")
return s
@@ -2468,6 +2577,96 @@ func valueToStringGenerated(v interface{}) string {
pv := reflect.Indirect(rv).Interface()
return fmt.Sprintf("*%v", pv)
}
+func (m *ExemptPriorityLevelConfiguration) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ExemptPriorityLevelConfiguration: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ExemptPriorityLevelConfiguration: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NominalConcurrencyShares", wireType)
+ }
+ var v int32
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.NominalConcurrencyShares = &v
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LendablePercent", wireType)
+ }
+ var v int32
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.LendablePercent = &v
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
func (m *FlowDistinguisherMethod) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
@@ -4547,6 +4746,42 @@ func (m *PriorityLevelConfigurationSpec) Unmarshal(dAtA []byte) error {
return err
}
iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Exempt", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Exempt == nil {
+ m.Exempt = &ExemptPriorityLevelConfiguration{}
+ }
+ if err := m.Exempt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
diff --git a/vendor/k8s.io/api/flowcontrol/v1alpha1/generated.proto b/vendor/k8s.io/api/flowcontrol/v1alpha1/generated.proto
index 69ca79ad2f..6509386f26 100644
--- a/vendor/k8s.io/api/flowcontrol/v1alpha1/generated.proto
+++ b/vendor/k8s.io/api/flowcontrol/v1alpha1/generated.proto
@@ -28,6 +28,40 @@ import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
// Package-wide variables from generator "generated".
option go_package = "k8s.io/api/flowcontrol/v1alpha1";
+// ExemptPriorityLevelConfiguration describes the configurable aspects
+// of the handling of exempt requests.
+// In the mandatory exempt configuration object the values in the fields
+// here can be modified by authorized users, unlike the rest of the `spec`.
+message ExemptPriorityLevelConfiguration {
+ // `nominalConcurrencyShares` (NCS) contributes to the computation of the
+ // NominalConcurrencyLimit (NominalCL) of this level.
+ // This is the number of execution seats nominally reserved for this priority level.
+ // This DOES NOT limit the dispatching from this priority level
+ // but affects the other priority levels through the borrowing mechanism.
+ // The server's concurrency limit (ServerCL) is divided among all the
+ // priority levels in proportion to their NCS values:
+ //
+ // NominalCL(i) = ceil( ServerCL * NCS(i) / sum_ncs )
+ // sum_ncs = sum[priority level k] NCS(k)
+ //
+ // Bigger numbers mean a larger nominal concurrency limit,
+ // at the expense of every other priority level.
+ // This field has a default value of zero.
+ // +optional
+ optional int32 nominalConcurrencyShares = 1;
+
+ // `lendablePercent` prescribes the fraction of the level's NominalCL that
+ // can be borrowed by other priority levels. This value of this
+ // field must be between 0 and 100, inclusive, and it defaults to 0.
+ // The number of seats that other levels can borrow from this level, known
+ // as this level's LendableConcurrencyLimit (LendableCL), is defined as follows.
+ //
+ // LendableCL(i) = round( NominalCL(i) * lendablePercent(i)/100.0 )
+ //
+ // +optional
+ optional int32 lendablePercent = 2;
+}
+
// FlowDistinguisherMethod specifies the method of a flow distinguisher.
message FlowDistinguisherMethod {
// `type` is the type of flow distinguisher method
@@ -332,6 +366,14 @@ message PriorityLevelConfigurationSpec {
// This field must be non-empty if and only if `type` is `"Limited"`.
// +optional
optional LimitedPriorityLevelConfiguration limited = 2;
+
+ // `exempt` specifies how requests are handled for an exempt priority level.
+ // This field MUST be empty if `type` is `"Limited"`.
+ // This field MAY be non-empty if `type` is `"Exempt"`.
+ // If empty and `type` is `"Exempt"` then the default values
+ // for `ExemptPriorityLevelConfiguration` apply.
+ // +optional
+ optional ExemptPriorityLevelConfiguration exempt = 3;
}
// PriorityLevelConfigurationStatus represents the current state of a "request-priority".
diff --git a/vendor/k8s.io/api/flowcontrol/v1alpha1/types.go b/vendor/k8s.io/api/flowcontrol/v1alpha1/types.go
index ebf665bcc3..161411ff33 100644
--- a/vendor/k8s.io/api/flowcontrol/v1alpha1/types.go
+++ b/vendor/k8s.io/api/flowcontrol/v1alpha1/types.go
@@ -399,6 +399,14 @@ type PriorityLevelConfigurationSpec struct {
// This field must be non-empty if and only if `type` is `"Limited"`.
// +optional
Limited *LimitedPriorityLevelConfiguration `json:"limited,omitempty" protobuf:"bytes,2,opt,name=limited"`
+
+ // `exempt` specifies how requests are handled for an exempt priority level.
+ // This field MUST be empty if `type` is `"Limited"`.
+ // This field MAY be non-empty if `type` is `"Exempt"`.
+ // If empty and `type` is `"Exempt"` then the default values
+ // for `ExemptPriorityLevelConfiguration` apply.
+ // +optional
+ Exempt *ExemptPriorityLevelConfiguration `json:"exempt,omitempty" protobuf:"bytes,3,opt,name=exempt"`
}
// PriorityLevelEnablement indicates whether limits on execution are enabled for the priority level
@@ -469,6 +477,43 @@ type LimitedPriorityLevelConfiguration struct {
BorrowingLimitPercent *int32 `json:"borrowingLimitPercent,omitempty" protobuf:"varint,4,opt,name=borrowingLimitPercent"`
}
+// ExemptPriorityLevelConfiguration describes the configurable aspects
+// of the handling of exempt requests.
+// In the mandatory exempt configuration object the values in the fields
+// here can be modified by authorized users, unlike the rest of the `spec`.
+type ExemptPriorityLevelConfiguration struct {
+ // `nominalConcurrencyShares` (NCS) contributes to the computation of the
+ // NominalConcurrencyLimit (NominalCL) of this level.
+ // This is the number of execution seats nominally reserved for this priority level.
+ // This DOES NOT limit the dispatching from this priority level
+ // but affects the other priority levels through the borrowing mechanism.
+ // The server's concurrency limit (ServerCL) is divided among all the
+ // priority levels in proportion to their NCS values:
+ //
+ // NominalCL(i) = ceil( ServerCL * NCS(i) / sum_ncs )
+ // sum_ncs = sum[priority level k] NCS(k)
+ //
+ // Bigger numbers mean a larger nominal concurrency limit,
+ // at the expense of every other priority level.
+ // This field has a default value of zero.
+ // +optional
+ NominalConcurrencyShares *int32 `json:"nominalConcurrencyShares,omitempty" protobuf:"varint,1,opt,name=nominalConcurrencyShares"`
+ // `lendablePercent` prescribes the fraction of the level's NominalCL that
+ // can be borrowed by other priority levels. This value of this
+ // field must be between 0 and 100, inclusive, and it defaults to 0.
+ // The number of seats that other levels can borrow from this level, known
+ // as this level's LendableConcurrencyLimit (LendableCL), is defined as follows.
+ //
+ // LendableCL(i) = round( NominalCL(i) * lendablePercent(i)/100.0 )
+ //
+ // +optional
+ LendablePercent *int32 `json:"lendablePercent,omitempty" protobuf:"varint,2,opt,name=lendablePercent"`
+ // The `BorrowingCL` of an Exempt priority level is implicitly `ServerCL`.
+ // In other words, an exempt priority level
+ // has no meaningful limit on how much it borrows.
+ // There is no explicit representation of that here.
+}
+
// LimitResponse defines how to handle requests that can not be executed right now.
// +union
type LimitResponse struct {
diff --git a/vendor/k8s.io/api/flowcontrol/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/flowcontrol/v1alpha1/types_swagger_doc_generated.go
index c95999fa5e..1d0680c108 100644
--- a/vendor/k8s.io/api/flowcontrol/v1alpha1/types_swagger_doc_generated.go
+++ b/vendor/k8s.io/api/flowcontrol/v1alpha1/types_swagger_doc_generated.go
@@ -27,6 +27,16 @@ package v1alpha1
// Those methods can be generated by using hack/update-codegen.sh
// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT.
+var map_ExemptPriorityLevelConfiguration = map[string]string{
+ "": "ExemptPriorityLevelConfiguration describes the configurable aspects of the handling of exempt requests. In the mandatory exempt configuration object the values in the fields here can be modified by authorized users, unlike the rest of the `spec`.",
+ "nominalConcurrencyShares": "`nominalConcurrencyShares` (NCS) contributes to the computation of the NominalConcurrencyLimit (NominalCL) of this level. This is the number of execution seats nominally reserved for this priority level. This DOES NOT limit the dispatching from this priority level but affects the other priority levels through the borrowing mechanism. The server's concurrency limit (ServerCL) is divided among all the priority levels in proportion to their NCS values:\n\nNominalCL(i) = ceil( ServerCL * NCS(i) / sum_ncs ) sum_ncs = sum[priority level k] NCS(k)\n\nBigger numbers mean a larger nominal concurrency limit, at the expense of every other priority level. This field has a default value of zero.",
+ "lendablePercent": "`lendablePercent` prescribes the fraction of the level's NominalCL that can be borrowed by other priority levels. This value of this field must be between 0 and 100, inclusive, and it defaults to 0. The number of seats that other levels can borrow from this level, known as this level's LendableConcurrencyLimit (LendableCL), is defined as follows.\n\nLendableCL(i) = round( NominalCL(i) * lendablePercent(i)/100.0 )",
+}
+
+func (ExemptPriorityLevelConfiguration) SwaggerDoc() map[string]string {
+ return map_ExemptPriorityLevelConfiguration
+}
+
var map_FlowDistinguisherMethod = map[string]string{
"": "FlowDistinguisherMethod specifies the method of a flow distinguisher.",
"type": "`type` is the type of flow distinguisher method The supported types are \"ByUser\" and \"ByNamespace\". Required.",
@@ -190,6 +200,7 @@ var map_PriorityLevelConfigurationSpec = map[string]string{
"": "PriorityLevelConfigurationSpec specifies the configuration of a priority level.",
"type": "`type` indicates whether this priority level is subject to limitation on request execution. A value of `\"Exempt\"` means that requests of this priority level are not subject to a limit (and thus are never queued) and do not detract from the capacity made available to other priority levels. A value of `\"Limited\"` means that (a) requests of this priority level _are_ subject to limits and (b) some of the server's limited capacity is made available exclusively to this priority level. Required.",
"limited": "`limited` specifies how requests are handled for a Limited priority level. This field must be non-empty if and only if `type` is `\"Limited\"`.",
+ "exempt": "`exempt` specifies how requests are handled for an exempt priority level. This field MUST be empty if `type` is `\"Limited\"`. This field MAY be non-empty if `type` is `\"Exempt\"`. If empty and `type` is `\"Exempt\"` then the default values for `ExemptPriorityLevelConfiguration` apply.",
}
func (PriorityLevelConfigurationSpec) SwaggerDoc() map[string]string {
diff --git a/vendor/k8s.io/api/flowcontrol/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/flowcontrol/v1alpha1/zz_generated.deepcopy.go
index e0272804f4..a5c9737aa5 100644
--- a/vendor/k8s.io/api/flowcontrol/v1alpha1/zz_generated.deepcopy.go
+++ b/vendor/k8s.io/api/flowcontrol/v1alpha1/zz_generated.deepcopy.go
@@ -25,6 +25,32 @@ import (
runtime "k8s.io/apimachinery/pkg/runtime"
)
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ExemptPriorityLevelConfiguration) DeepCopyInto(out *ExemptPriorityLevelConfiguration) {
+ *out = *in
+ if in.NominalConcurrencyShares != nil {
+ in, out := &in.NominalConcurrencyShares, &out.NominalConcurrencyShares
+ *out = new(int32)
+ **out = **in
+ }
+ if in.LendablePercent != nil {
+ in, out := &in.LendablePercent, &out.LendablePercent
+ *out = new(int32)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExemptPriorityLevelConfiguration.
+func (in *ExemptPriorityLevelConfiguration) DeepCopy() *ExemptPriorityLevelConfiguration {
+ if in == nil {
+ return nil
+ }
+ out := new(ExemptPriorityLevelConfiguration)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *FlowDistinguisherMethod) DeepCopyInto(out *FlowDistinguisherMethod) {
*out = *in
@@ -400,6 +426,11 @@ func (in *PriorityLevelConfigurationSpec) DeepCopyInto(out *PriorityLevelConfigu
*out = new(LimitedPriorityLevelConfiguration)
(*in).DeepCopyInto(*out)
}
+ if in.Exempt != nil {
+ in, out := &in.Exempt, &out.Exempt
+ *out = new(ExemptPriorityLevelConfiguration)
+ (*in).DeepCopyInto(*out)
+ }
return
}
diff --git a/vendor/k8s.io/api/flowcontrol/v1beta1/generated.pb.go b/vendor/k8s.io/api/flowcontrol/v1beta1/generated.pb.go
index fbaea85dd6..33f4b97e39 100644
--- a/vendor/k8s.io/api/flowcontrol/v1beta1/generated.pb.go
+++ b/vendor/k8s.io/api/flowcontrol/v1beta1/generated.pb.go
@@ -43,10 +43,38 @@ var _ = math.Inf
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+func (m *ExemptPriorityLevelConfiguration) Reset() { *m = ExemptPriorityLevelConfiguration{} }
+func (*ExemptPriorityLevelConfiguration) ProtoMessage() {}
+func (*ExemptPriorityLevelConfiguration) Descriptor() ([]byte, []int) {
+ return fileDescriptor_80171c2a4e3669de, []int{0}
+}
+func (m *ExemptPriorityLevelConfiguration) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ExemptPriorityLevelConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ExemptPriorityLevelConfiguration) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ExemptPriorityLevelConfiguration.Merge(m, src)
+}
+func (m *ExemptPriorityLevelConfiguration) XXX_Size() int {
+ return m.Size()
+}
+func (m *ExemptPriorityLevelConfiguration) XXX_DiscardUnknown() {
+ xxx_messageInfo_ExemptPriorityLevelConfiguration.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ExemptPriorityLevelConfiguration proto.InternalMessageInfo
+
func (m *FlowDistinguisherMethod) Reset() { *m = FlowDistinguisherMethod{} }
func (*FlowDistinguisherMethod) ProtoMessage() {}
func (*FlowDistinguisherMethod) Descriptor() ([]byte, []int) {
- return fileDescriptor_80171c2a4e3669de, []int{0}
+ return fileDescriptor_80171c2a4e3669de, []int{1}
}
func (m *FlowDistinguisherMethod) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -74,7 +102,7 @@ var xxx_messageInfo_FlowDistinguisherMethod proto.InternalMessageInfo
func (m *FlowSchema) Reset() { *m = FlowSchema{} }
func (*FlowSchema) ProtoMessage() {}
func (*FlowSchema) Descriptor() ([]byte, []int) {
- return fileDescriptor_80171c2a4e3669de, []int{1}
+ return fileDescriptor_80171c2a4e3669de, []int{2}
}
func (m *FlowSchema) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -102,7 +130,7 @@ var xxx_messageInfo_FlowSchema proto.InternalMessageInfo
func (m *FlowSchemaCondition) Reset() { *m = FlowSchemaCondition{} }
func (*FlowSchemaCondition) ProtoMessage() {}
func (*FlowSchemaCondition) Descriptor() ([]byte, []int) {
- return fileDescriptor_80171c2a4e3669de, []int{2}
+ return fileDescriptor_80171c2a4e3669de, []int{3}
}
func (m *FlowSchemaCondition) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -130,7 +158,7 @@ var xxx_messageInfo_FlowSchemaCondition proto.InternalMessageInfo
func (m *FlowSchemaList) Reset() { *m = FlowSchemaList{} }
func (*FlowSchemaList) ProtoMessage() {}
func (*FlowSchemaList) Descriptor() ([]byte, []int) {
- return fileDescriptor_80171c2a4e3669de, []int{3}
+ return fileDescriptor_80171c2a4e3669de, []int{4}
}
func (m *FlowSchemaList) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -158,7 +186,7 @@ var xxx_messageInfo_FlowSchemaList proto.InternalMessageInfo
func (m *FlowSchemaSpec) Reset() { *m = FlowSchemaSpec{} }
func (*FlowSchemaSpec) ProtoMessage() {}
func (*FlowSchemaSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_80171c2a4e3669de, []int{4}
+ return fileDescriptor_80171c2a4e3669de, []int{5}
}
func (m *FlowSchemaSpec) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -186,7 +214,7 @@ var xxx_messageInfo_FlowSchemaSpec proto.InternalMessageInfo
func (m *FlowSchemaStatus) Reset() { *m = FlowSchemaStatus{} }
func (*FlowSchemaStatus) ProtoMessage() {}
func (*FlowSchemaStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_80171c2a4e3669de, []int{5}
+ return fileDescriptor_80171c2a4e3669de, []int{6}
}
func (m *FlowSchemaStatus) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -214,7 +242,7 @@ var xxx_messageInfo_FlowSchemaStatus proto.InternalMessageInfo
func (m *GroupSubject) Reset() { *m = GroupSubject{} }
func (*GroupSubject) ProtoMessage() {}
func (*GroupSubject) Descriptor() ([]byte, []int) {
- return fileDescriptor_80171c2a4e3669de, []int{6}
+ return fileDescriptor_80171c2a4e3669de, []int{7}
}
func (m *GroupSubject) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -242,7 +270,7 @@ var xxx_messageInfo_GroupSubject proto.InternalMessageInfo
func (m *LimitResponse) Reset() { *m = LimitResponse{} }
func (*LimitResponse) ProtoMessage() {}
func (*LimitResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_80171c2a4e3669de, []int{7}
+ return fileDescriptor_80171c2a4e3669de, []int{8}
}
func (m *LimitResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -270,7 +298,7 @@ var xxx_messageInfo_LimitResponse proto.InternalMessageInfo
func (m *LimitedPriorityLevelConfiguration) Reset() { *m = LimitedPriorityLevelConfiguration{} }
func (*LimitedPriorityLevelConfiguration) ProtoMessage() {}
func (*LimitedPriorityLevelConfiguration) Descriptor() ([]byte, []int) {
- return fileDescriptor_80171c2a4e3669de, []int{8}
+ return fileDescriptor_80171c2a4e3669de, []int{9}
}
func (m *LimitedPriorityLevelConfiguration) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -298,7 +326,7 @@ var xxx_messageInfo_LimitedPriorityLevelConfiguration proto.InternalMessageInfo
func (m *NonResourcePolicyRule) Reset() { *m = NonResourcePolicyRule{} }
func (*NonResourcePolicyRule) ProtoMessage() {}
func (*NonResourcePolicyRule) Descriptor() ([]byte, []int) {
- return fileDescriptor_80171c2a4e3669de, []int{9}
+ return fileDescriptor_80171c2a4e3669de, []int{10}
}
func (m *NonResourcePolicyRule) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -326,7 +354,7 @@ var xxx_messageInfo_NonResourcePolicyRule proto.InternalMessageInfo
func (m *PolicyRulesWithSubjects) Reset() { *m = PolicyRulesWithSubjects{} }
func (*PolicyRulesWithSubjects) ProtoMessage() {}
func (*PolicyRulesWithSubjects) Descriptor() ([]byte, []int) {
- return fileDescriptor_80171c2a4e3669de, []int{10}
+ return fileDescriptor_80171c2a4e3669de, []int{11}
}
func (m *PolicyRulesWithSubjects) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -354,7 +382,7 @@ var xxx_messageInfo_PolicyRulesWithSubjects proto.InternalMessageInfo
func (m *PriorityLevelConfiguration) Reset() { *m = PriorityLevelConfiguration{} }
func (*PriorityLevelConfiguration) ProtoMessage() {}
func (*PriorityLevelConfiguration) Descriptor() ([]byte, []int) {
- return fileDescriptor_80171c2a4e3669de, []int{11}
+ return fileDescriptor_80171c2a4e3669de, []int{12}
}
func (m *PriorityLevelConfiguration) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -382,7 +410,7 @@ var xxx_messageInfo_PriorityLevelConfiguration proto.InternalMessageInfo
func (m *PriorityLevelConfigurationCondition) Reset() { *m = PriorityLevelConfigurationCondition{} }
func (*PriorityLevelConfigurationCondition) ProtoMessage() {}
func (*PriorityLevelConfigurationCondition) Descriptor() ([]byte, []int) {
- return fileDescriptor_80171c2a4e3669de, []int{12}
+ return fileDescriptor_80171c2a4e3669de, []int{13}
}
func (m *PriorityLevelConfigurationCondition) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -410,7 +438,7 @@ var xxx_messageInfo_PriorityLevelConfigurationCondition proto.InternalMessageInf
func (m *PriorityLevelConfigurationList) Reset() { *m = PriorityLevelConfigurationList{} }
func (*PriorityLevelConfigurationList) ProtoMessage() {}
func (*PriorityLevelConfigurationList) Descriptor() ([]byte, []int) {
- return fileDescriptor_80171c2a4e3669de, []int{13}
+ return fileDescriptor_80171c2a4e3669de, []int{14}
}
func (m *PriorityLevelConfigurationList) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -438,7 +466,7 @@ var xxx_messageInfo_PriorityLevelConfigurationList proto.InternalMessageInfo
func (m *PriorityLevelConfigurationReference) Reset() { *m = PriorityLevelConfigurationReference{} }
func (*PriorityLevelConfigurationReference) ProtoMessage() {}
func (*PriorityLevelConfigurationReference) Descriptor() ([]byte, []int) {
- return fileDescriptor_80171c2a4e3669de, []int{14}
+ return fileDescriptor_80171c2a4e3669de, []int{15}
}
func (m *PriorityLevelConfigurationReference) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -466,7 +494,7 @@ var xxx_messageInfo_PriorityLevelConfigurationReference proto.InternalMessageInf
func (m *PriorityLevelConfigurationSpec) Reset() { *m = PriorityLevelConfigurationSpec{} }
func (*PriorityLevelConfigurationSpec) ProtoMessage() {}
func (*PriorityLevelConfigurationSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_80171c2a4e3669de, []int{15}
+ return fileDescriptor_80171c2a4e3669de, []int{16}
}
func (m *PriorityLevelConfigurationSpec) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -494,7 +522,7 @@ var xxx_messageInfo_PriorityLevelConfigurationSpec proto.InternalMessageInfo
func (m *PriorityLevelConfigurationStatus) Reset() { *m = PriorityLevelConfigurationStatus{} }
func (*PriorityLevelConfigurationStatus) ProtoMessage() {}
func (*PriorityLevelConfigurationStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_80171c2a4e3669de, []int{16}
+ return fileDescriptor_80171c2a4e3669de, []int{17}
}
func (m *PriorityLevelConfigurationStatus) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -522,7 +550,7 @@ var xxx_messageInfo_PriorityLevelConfigurationStatus proto.InternalMessageInfo
func (m *QueuingConfiguration) Reset() { *m = QueuingConfiguration{} }
func (*QueuingConfiguration) ProtoMessage() {}
func (*QueuingConfiguration) Descriptor() ([]byte, []int) {
- return fileDescriptor_80171c2a4e3669de, []int{17}
+ return fileDescriptor_80171c2a4e3669de, []int{18}
}
func (m *QueuingConfiguration) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -550,7 +578,7 @@ var xxx_messageInfo_QueuingConfiguration proto.InternalMessageInfo
func (m *ResourcePolicyRule) Reset() { *m = ResourcePolicyRule{} }
func (*ResourcePolicyRule) ProtoMessage() {}
func (*ResourcePolicyRule) Descriptor() ([]byte, []int) {
- return fileDescriptor_80171c2a4e3669de, []int{18}
+ return fileDescriptor_80171c2a4e3669de, []int{19}
}
func (m *ResourcePolicyRule) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -578,7 +606,7 @@ var xxx_messageInfo_ResourcePolicyRule proto.InternalMessageInfo
func (m *ServiceAccountSubject) Reset() { *m = ServiceAccountSubject{} }
func (*ServiceAccountSubject) ProtoMessage() {}
func (*ServiceAccountSubject) Descriptor() ([]byte, []int) {
- return fileDescriptor_80171c2a4e3669de, []int{19}
+ return fileDescriptor_80171c2a4e3669de, []int{20}
}
func (m *ServiceAccountSubject) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -606,7 +634,7 @@ var xxx_messageInfo_ServiceAccountSubject proto.InternalMessageInfo
func (m *Subject) Reset() { *m = Subject{} }
func (*Subject) ProtoMessage() {}
func (*Subject) Descriptor() ([]byte, []int) {
- return fileDescriptor_80171c2a4e3669de, []int{20}
+ return fileDescriptor_80171c2a4e3669de, []int{21}
}
func (m *Subject) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -634,7 +662,7 @@ var xxx_messageInfo_Subject proto.InternalMessageInfo
func (m *UserSubject) Reset() { *m = UserSubject{} }
func (*UserSubject) ProtoMessage() {}
func (*UserSubject) Descriptor() ([]byte, []int) {
- return fileDescriptor_80171c2a4e3669de, []int{21}
+ return fileDescriptor_80171c2a4e3669de, []int{22}
}
func (m *UserSubject) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -660,6 +688,7 @@ func (m *UserSubject) XXX_DiscardUnknown() {
var xxx_messageInfo_UserSubject proto.InternalMessageInfo
func init() {
+ proto.RegisterType((*ExemptPriorityLevelConfiguration)(nil), "k8s.io.api.flowcontrol.v1beta1.ExemptPriorityLevelConfiguration")
proto.RegisterType((*FlowDistinguisherMethod)(nil), "k8s.io.api.flowcontrol.v1beta1.FlowDistinguisherMethod")
proto.RegisterType((*FlowSchema)(nil), "k8s.io.api.flowcontrol.v1beta1.FlowSchema")
proto.RegisterType((*FlowSchemaCondition)(nil), "k8s.io.api.flowcontrol.v1beta1.FlowSchemaCondition")
@@ -689,105 +718,141 @@ func init() {
}
var fileDescriptor_80171c2a4e3669de = []byte{
- // 1553 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x58, 0x4f, 0x6f, 0xdb, 0xc6,
- 0x12, 0x37, 0x65, 0xc9, 0xb6, 0xc6, 0x7f, 0xb3, 0x8e, 0x61, 0x3d, 0x07, 0x90, 0x1c, 0x3e, 0xe0,
- 0xe5, 0xbd, 0x97, 0x84, 0x4a, 0xd2, 0xa4, 0x49, 0x5b, 0xf4, 0x8f, 0xe9, 0xb4, 0x69, 0x1a, 0xdb,
- 0x71, 0xd6, 0x49, 0x5b, 0xa4, 0x01, 0x1a, 0x8a, 0x5a, 0x4b, 0x8c, 0x25, 0x92, 0xd9, 0x25, 0x65,
- 0xb8, 0xb9, 0x14, 0xfd, 0x04, 0x3d, 0xb7, 0xc7, 0x1e, 0x7a, 0xef, 0x17, 0xe8, 0xb1, 0x41, 0x4f,
- 0x39, 0xe6, 0xa4, 0x36, 0xea, 0xa9, 0xdf, 0xa0, 0x0d, 0x50, 0xa0, 0xd8, 0xe5, 0x92, 0x14, 0xf5,
- 0x8f, 0x42, 0x02, 0xe4, 0xd4, 0x9b, 0x39, 0xf3, 0x9b, 0xdf, 0xec, 0xcc, 0xce, 0xcc, 0x8e, 0x0c,
- 0xd7, 0x0e, 0xae, 0x30, 0xcd, 0x72, 0xca, 0x07, 0x7e, 0x85, 0x50, 0x9b, 0x78, 0x84, 0x95, 0x5b,
- 0xc4, 0xae, 0x3a, 0xb4, 0x2c, 0x15, 0x86, 0x6b, 0x95, 0xf7, 0x1b, 0xce, 0xa1, 0xe9, 0xd8, 0x1e,
- 0x75, 0x1a, 0xe5, 0xd6, 0xf9, 0x0a, 0xf1, 0x8c, 0xf3, 0xe5, 0x1a, 0xb1, 0x09, 0x35, 0x3c, 0x52,
- 0xd5, 0x5c, 0xea, 0x78, 0x0e, 0x2a, 0x06, 0x78, 0xcd, 0x70, 0x2d, 0xad, 0x0b, 0xaf, 0x49, 0xfc,
- 0xda, 0xd9, 0x9a, 0xe5, 0xd5, 0xfd, 0x8a, 0x66, 0x3a, 0xcd, 0x72, 0xcd, 0xa9, 0x39, 0x65, 0x61,
- 0x56, 0xf1, 0xf7, 0xc5, 0x97, 0xf8, 0x10, 0x7f, 0x05, 0x74, 0x6b, 0x17, 0x63, 0xf7, 0x4d, 0xc3,
- 0xac, 0x5b, 0x36, 0xa1, 0x47, 0x65, 0xf7, 0xa0, 0xc6, 0x05, 0xac, 0xdc, 0x24, 0x9e, 0x51, 0x6e,
- 0xf5, 0x1d, 0x62, 0xad, 0x3c, 0xcc, 0x8a, 0xfa, 0xb6, 0x67, 0x35, 0x49, 0x9f, 0xc1, 0xeb, 0x69,
- 0x06, 0xcc, 0xac, 0x93, 0xa6, 0xd1, 0x6b, 0xa7, 0xde, 0x85, 0xd5, 0x0f, 0x1a, 0xce, 0xe1, 0x55,
- 0x8b, 0x79, 0x96, 0x5d, 0xf3, 0x2d, 0x56, 0x27, 0x74, 0x9b, 0x78, 0x75, 0xa7, 0x8a, 0xde, 0x85,
- 0xac, 0x77, 0xe4, 0x92, 0x82, 0xb2, 0xae, 0xfc, 0x37, 0xaf, 0x9f, 0x7e, 0xdc, 0x2e, 0x4d, 0x74,
- 0xda, 0xa5, 0xec, 0xed, 0x23, 0x97, 0x3c, 0x6f, 0x97, 0x4e, 0x0c, 0x31, 0xe3, 0x6a, 0x2c, 0x0c,
- 0xd5, 0x6f, 0x32, 0x00, 0x1c, 0xb5, 0x27, 0x5c, 0xa3, 0xfb, 0x30, 0xc3, 0xc3, 0xad, 0x1a, 0x9e,
- 0x21, 0x38, 0x67, 0x2f, 0x9c, 0xd3, 0xe2, 0x5c, 0x47, 0xa7, 0xd6, 0xdc, 0x83, 0x1a, 0x17, 0x30,
- 0x8d, 0xa3, 0xb5, 0xd6, 0x79, 0xed, 0x66, 0xe5, 0x01, 0x31, 0xbd, 0x6d, 0xe2, 0x19, 0x3a, 0x92,
- 0xa7, 0x80, 0x58, 0x86, 0x23, 0x56, 0xb4, 0x0b, 0x59, 0xe6, 0x12, 0xb3, 0x90, 0x11, 0xec, 0x9a,
- 0x36, 0xfa, 0x26, 0xb5, 0xf8, 0x6c, 0x7b, 0x2e, 0x31, 0xf5, 0xb9, 0x30, 0x42, 0xfe, 0x85, 0x05,
- 0x13, 0xfa, 0x14, 0xa6, 0x98, 0x67, 0x78, 0x3e, 0x2b, 0x4c, 0xf6, 0x9d, 0x38, 0x8d, 0x53, 0xd8,
- 0xe9, 0x0b, 0x92, 0x75, 0x2a, 0xf8, 0xc6, 0x92, 0x4f, 0x7d, 0x9a, 0x81, 0xe5, 0x18, 0xbc, 0xe9,
- 0xd8, 0x55, 0xcb, 0xb3, 0x1c, 0x1b, 0xbd, 0x95, 0xc8, 0xfa, 0xa9, 0x9e, 0xac, 0xaf, 0x0e, 0x30,
- 0x89, 0x33, 0x8e, 0xde, 0x88, 0x8e, 0x9b, 0x11, 0xe6, 0x27, 0x93, 0xce, 0x9f, 0xb7, 0x4b, 0x8b,
- 0x91, 0x59, 0xf2, 0x3c, 0xa8, 0x05, 0xa8, 0x61, 0x30, 0xef, 0x36, 0x35, 0x6c, 0x16, 0xd0, 0x5a,
- 0x4d, 0x22, 0xa3, 0xfe, 0xff, 0x78, 0xf7, 0xc4, 0x2d, 0xf4, 0x35, 0xe9, 0x12, 0x6d, 0xf5, 0xb1,
- 0xe1, 0x01, 0x1e, 0xd0, 0x7f, 0x60, 0x8a, 0x12, 0x83, 0x39, 0x76, 0x21, 0x2b, 0x8e, 0x1c, 0xe5,
- 0x0b, 0x0b, 0x29, 0x96, 0x5a, 0xf4, 0x3f, 0x98, 0x6e, 0x12, 0xc6, 0x8c, 0x1a, 0x29, 0xe4, 0x04,
- 0x70, 0x51, 0x02, 0xa7, 0xb7, 0x03, 0x31, 0x0e, 0xf5, 0xea, 0x8f, 0x0a, 0x2c, 0xc4, 0x79, 0xda,
- 0xb2, 0x98, 0x87, 0xee, 0xf5, 0xd5, 0x9e, 0x36, 0x5e, 0x4c, 0xdc, 0x5a, 0x54, 0xde, 0x92, 0x74,
- 0x37, 0x13, 0x4a, 0xba, 0xea, 0xee, 0x26, 0xe4, 0x2c, 0x8f, 0x34, 0x79, 0xd6, 0x27, 0x7b, 0xd2,
- 0x95, 0x52, 0x24, 0xfa, 0xbc, 0xa4, 0xcd, 0x5d, 0xe7, 0x04, 0x38, 0xe0, 0x51, 0x7f, 0x9f, 0xec,
- 0x8e, 0x80, 0xd7, 0x23, 0xfa, 0x5e, 0x81, 0x35, 0x97, 0x5a, 0x0e, 0xb5, 0xbc, 0xa3, 0x2d, 0xd2,
- 0x22, 0x8d, 0x4d, 0xc7, 0xde, 0xb7, 0x6a, 0x3e, 0x35, 0x78, 0x2a, 0x65, 0x50, 0x9b, 0x69, 0x9e,
- 0x77, 0x87, 0x32, 0x60, 0xb2, 0x4f, 0x28, 0xb1, 0x4d, 0xa2, 0xab, 0xf2, 0x48, 0x6b, 0x23, 0xc0,
- 0x23, 0x8e, 0x82, 0x3e, 0x02, 0xd4, 0x34, 0x3c, 0x9e, 0xd1, 0xda, 0x2e, 0x25, 0x26, 0xa9, 0x72,
- 0x56, 0x51, 0x90, 0xb9, 0xb8, 0x3a, 0xb6, 0xfb, 0x10, 0x78, 0x80, 0x15, 0xfa, 0x4a, 0x81, 0xe5,
- 0x6a, 0xff, 0x90, 0x91, 0x75, 0x79, 0x79, 0x9c, 0x44, 0x0f, 0x98, 0x51, 0xfa, 0x6a, 0xa7, 0x5d,
- 0x5a, 0x1e, 0xa0, 0xc0, 0x83, 0x9c, 0xa1, 0x7b, 0x90, 0xa3, 0x7e, 0x83, 0xb0, 0x42, 0x56, 0x5c,
- 0x6f, 0xaa, 0xd7, 0x5d, 0xa7, 0x61, 0x99, 0x47, 0x98, 0x9b, 0x7c, 0x62, 0x79, 0xf5, 0x3d, 0x5f,
- 0xcc, 0x2a, 0x16, 0xdf, 0xb5, 0x50, 0xe1, 0x80, 0x54, 0x7d, 0x04, 0x4b, 0xbd, 0x43, 0x03, 0xd5,
- 0x00, 0xcc, 0xb0, 0x4f, 0x59, 0x41, 0x11, 0x6e, 0x5f, 0x1b, 0xbf, 0xaa, 0xa2, 0x1e, 0x8f, 0xe7,
- 0x65, 0x24, 0x62, 0xb8, 0x8b, 0x5a, 0x3d, 0x07, 0x73, 0xd7, 0xa8, 0xe3, 0xbb, 0xf2, 0x8c, 0x68,
- 0x1d, 0xb2, 0xb6, 0xd1, 0x0c, 0xa7, 0x4f, 0x34, 0x11, 0x77, 0x8c, 0x26, 0xc1, 0x42, 0xa3, 0x7e,
- 0xa7, 0xc0, 0xfc, 0x96, 0xd5, 0xb4, 0x3c, 0x4c, 0x98, 0xeb, 0xd8, 0x8c, 0xa0, 0x4b, 0x89, 0x89,
- 0x75, 0xb2, 0x67, 0x62, 0x1d, 0x4b, 0x80, 0xbb, 0x66, 0xd5, 0x67, 0x30, 0xfd, 0xd0, 0x27, 0xbe,
- 0x65, 0xd7, 0xe4, 0xbc, 0xbe, 0x98, 0x16, 0xe0, 0xad, 0x00, 0x9e, 0xa8, 0x36, 0x7d, 0x96, 0x8f,
- 0x00, 0xa9, 0xc1, 0x21, 0xa3, 0xfa, 0x57, 0x06, 0x4e, 0x0a, 0xc7, 0xa4, 0x3a, 0xbc, 0x8a, 0xd1,
- 0x3d, 0x28, 0x18, 0x8c, 0xf9, 0x94, 0x54, 0x37, 0x1d, 0xdb, 0xf4, 0x29, 0xaf, 0xff, 0xa3, 0xbd,
- 0xba, 0x41, 0x09, 0x13, 0xd1, 0xe4, 0xf4, 0x75, 0x19, 0x4d, 0x61, 0x63, 0x08, 0x0e, 0x0f, 0x65,
- 0x40, 0x0f, 0x60, 0xbe, 0xd1, 0x1d, 0xbb, 0x0c, 0xf3, 0x6c, 0x5a, 0x98, 0x89, 0x84, 0xe9, 0x2b,
- 0xf2, 0x04, 0xc9, 0xa4, 0xe3, 0x24, 0x35, 0x7a, 0x1b, 0x16, 0x1b, 0xc4, 0xae, 0x1a, 0x95, 0x06,
- 0xd9, 0x25, 0xd4, 0x24, 0xb6, 0x27, 0x5a, 0x24, 0xa7, 0x2f, 0x77, 0xda, 0xa5, 0xc5, 0xad, 0xa4,
- 0x0a, 0xf7, 0x62, 0xd1, 0x4d, 0x58, 0xa9, 0x38, 0x94, 0x3a, 0x87, 0x96, 0x5d, 0x13, 0x7e, 0x42,
- 0x92, 0xac, 0x20, 0xf9, 0x57, 0xa7, 0x5d, 0x5a, 0xd1, 0x07, 0x01, 0xf0, 0x60, 0x3b, 0xf5, 0x10,
- 0x56, 0x76, 0xf8, 0x4c, 0x61, 0x8e, 0x4f, 0x4d, 0x12, 0x37, 0x04, 0x2a, 0x41, 0xae, 0x45, 0x68,
- 0x25, 0x28, 0xea, 0xbc, 0x9e, 0xe7, 0xed, 0xf0, 0x31, 0x17, 0xe0, 0x40, 0xce, 0x23, 0xb1, 0x63,
- 0xcb, 0x3b, 0x78, 0x8b, 0x15, 0xa6, 0x04, 0x54, 0x44, 0xb2, 0x93, 0x54, 0xe1, 0x5e, 0xac, 0xda,
- 0xce, 0xc0, 0xea, 0x90, 0xfe, 0x43, 0x77, 0x60, 0x86, 0xc9, 0xbf, 0x65, 0x4f, 0x9d, 0x4a, 0xbb,
- 0x0b, 0x69, 0x1b, 0x4f, 0xff, 0x90, 0x0c, 0x47, 0x54, 0xc8, 0x81, 0x79, 0x2a, 0x8f, 0x20, 0x7c,
- 0xca, 0x57, 0xe0, 0x42, 0x1a, 0x77, 0x7f, 0x76, 0xe2, 0xcb, 0xc6, 0xdd, 0x84, 0x38, 0xc9, 0x8f,
- 0x1e, 0xc1, 0x52, 0x57, 0xd8, 0x81, 0xcf, 0x49, 0xe1, 0xf3, 0x52, 0x9a, 0xcf, 0x81, 0x97, 0xa2,
- 0x17, 0xa4, 0xdb, 0xa5, 0x9d, 0x1e, 0x5a, 0xdc, 0xe7, 0x48, 0xfd, 0x39, 0x03, 0x23, 0x1e, 0x86,
- 0x57, 0xb0, 0xe4, 0xdd, 0x4f, 0x2c, 0x79, 0xef, 0xbc, 0xf8, 0x8b, 0x37, 0x74, 0xe9, 0xab, 0xf7,
- 0x2c, 0x7d, 0xef, 0xbd, 0x84, 0x8f, 0xd1, 0x4b, 0xe0, 0x1f, 0x19, 0xf8, 0xf7, 0x70, 0xe3, 0x78,
- 0x29, 0xbc, 0x91, 0x18, 0xb1, 0x97, 0x7b, 0x46, 0xec, 0xa9, 0x31, 0x28, 0xfe, 0x59, 0x12, 0x7b,
- 0x96, 0xc4, 0x5f, 0x14, 0x28, 0x0e, 0xcf, 0xdb, 0x2b, 0x58, 0x1a, 0x3f, 0x4f, 0x2e, 0x8d, 0x6f,
- 0xbe, 0x78, 0x91, 0x0d, 0x59, 0x22, 0xaf, 0x8d, 0xaa, 0xad, 0x68, 0xdd, 0x1b, 0xe3, 0xc9, 0xff,
- 0x69, 0x64, 0xaa, 0xc4, 0x76, 0x9a, 0xf2, 0xab, 0x25, 0x61, 0xfd, 0xbe, 0xcd, 0x9f, 0x9e, 0x26,
- 0x7f, 0x3d, 0x82, 0x82, 0xac, 0xc3, 0x74, 0x23, 0x78, 0xab, 0x65, 0x53, 0x6f, 0x8c, 0xf5, 0x44,
- 0x8e, 0x7a, 0xda, 0x83, 0xb5, 0x40, 0xc2, 0x70, 0x48, 0xaf, 0x7e, 0xab, 0xc0, 0x7a, 0x5a, 0xb3,
- 0xa2, 0xc3, 0x01, 0xcb, 0xd7, 0x4b, 0x2c, 0xd6, 0xe3, 0x2f, 0x63, 0x3f, 0x28, 0x70, 0x7c, 0xd0,
- 0x8e, 0xc3, 0xcb, 0x9f, 0x2f, 0x36, 0xd1, 0x56, 0x12, 0x95, 0xff, 0x2d, 0x21, 0xc5, 0x52, 0x8b,
- 0xce, 0xc0, 0x4c, 0xdd, 0xb0, 0xab, 0x7b, 0xd6, 0x17, 0xe1, 0xbe, 0x1d, 0x15, 0xe0, 0x87, 0x52,
- 0x8e, 0x23, 0x04, 0xba, 0x0a, 0x4b, 0xc2, 0x6e, 0x8b, 0xd8, 0x35, 0xaf, 0x2e, 0x72, 0x25, 0x97,
- 0x86, 0xe8, 0x3d, 0xb8, 0xd5, 0xa3, 0xc7, 0x7d, 0x16, 0xea, 0x9f, 0x0a, 0xa0, 0x17, 0x79, 0xe7,
- 0x4f, 0x43, 0xde, 0x70, 0x2d, 0xb1, 0x7c, 0x06, 0x2d, 0x90, 0xd7, 0xe7, 0x3b, 0xed, 0x52, 0x7e,
- 0x63, 0xf7, 0x7a, 0x20, 0xc4, 0xb1, 0x9e, 0x83, 0xc3, 0x27, 0x30, 0x78, 0xea, 0x24, 0x38, 0x74,
- 0xcc, 0x70, 0xac, 0x47, 0x57, 0x60, 0xce, 0x6c, 0xf8, 0xcc, 0x23, 0x74, 0xcf, 0x74, 0x5c, 0x22,
- 0x46, 0xc6, 0x8c, 0x7e, 0x5c, 0xc6, 0x34, 0xb7, 0xd9, 0xa5, 0xc3, 0x09, 0x24, 0xd2, 0x00, 0x78,
- 0xc1, 0x33, 0xd7, 0xe0, 0x7e, 0x72, 0xc2, 0xcf, 0x02, 0xbf, 0xb0, 0x9d, 0x48, 0x8a, 0xbb, 0x10,
- 0xea, 0x03, 0x58, 0xd9, 0x23, 0xb4, 0x65, 0x99, 0x64, 0xc3, 0x34, 0x1d, 0xdf, 0xf6, 0xc2, 0x35,
- 0xba, 0x0c, 0xf9, 0x08, 0x26, 0x7b, 0xe2, 0x98, 0xf4, 0x9f, 0x8f, 0xb8, 0x70, 0x8c, 0x89, 0x9a,
- 0x30, 0x33, 0xbc, 0x09, 0x33, 0x30, 0x1d, 0xd3, 0x67, 0x0f, 0x2c, 0xbb, 0x2a, 0x99, 0x4f, 0x84,
- 0xe8, 0x1b, 0x96, 0x5d, 0x7d, 0xde, 0x2e, 0xcd, 0x4a, 0x18, 0xff, 0xc4, 0x02, 0x88, 0xae, 0x43,
- 0xd6, 0x67, 0x84, 0xca, 0xf6, 0x3a, 0x9d, 0x56, 0xcc, 0x77, 0x18, 0xa1, 0xe1, 0xe6, 0x33, 0xc3,
- 0x99, 0xb9, 0x00, 0x0b, 0x0a, 0xb4, 0x0d, 0xb9, 0x1a, 0xbf, 0x14, 0x39, 0xf5, 0xcf, 0xa4, 0x71,
- 0x75, 0xff, 0xbc, 0x08, 0xca, 0x40, 0x48, 0x70, 0xc0, 0x82, 0x1e, 0xc2, 0x02, 0x4b, 0xa4, 0x50,
- 0x5c, 0xd7, 0x18, 0x9b, 0xcc, 0xc0, 0xc4, 0xeb, 0xa8, 0xd3, 0x2e, 0x2d, 0x24, 0x55, 0xb8, 0xc7,
- 0x81, 0x5a, 0x86, 0xd9, 0xae, 0x00, 0xd3, 0xe7, 0x9f, 0x7e, 0xf5, 0xf1, 0xb3, 0xe2, 0xc4, 0x93,
- 0x67, 0xc5, 0x89, 0xa7, 0xcf, 0x8a, 0x13, 0x5f, 0x76, 0x8a, 0xca, 0xe3, 0x4e, 0x51, 0x79, 0xd2,
- 0x29, 0x2a, 0x4f, 0x3b, 0x45, 0xe5, 0xd7, 0x4e, 0x51, 0xf9, 0xfa, 0xb7, 0xe2, 0xc4, 0xdd, 0xe2,
- 0xe8, 0xff, 0x33, 0xfe, 0x1d, 0x00, 0x00, 0xff, 0xff, 0x2c, 0x6d, 0x6e, 0x75, 0xa1, 0x14, 0x00,
- 0x00,
+ // 1614 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x58, 0xcf, 0x73, 0xdb, 0xc4,
+ 0x17, 0x8f, 0x1c, 0x3b, 0x89, 0x5f, 0x7e, 0x76, 0xd3, 0x4c, 0xfc, 0x4d, 0xbf, 0x63, 0xa7, 0x62,
+ 0x86, 0x02, 0x6d, 0xe5, 0xb6, 0xb4, 0xb4, 0xc0, 0xf0, 0x23, 0x4a, 0x4b, 0x29, 0x4d, 0xd2, 0x74,
+ 0xd3, 0x42, 0xa7, 0x74, 0x86, 0xca, 0xf2, 0xc6, 0x56, 0x63, 0x4b, 0xea, 0xae, 0xe4, 0x10, 0x7a,
+ 0x61, 0xf8, 0x0b, 0x38, 0xc3, 0x91, 0x03, 0x27, 0x2e, 0x5c, 0x39, 0x70, 0xa4, 0xc3, 0xa9, 0xc7,
+ 0x9e, 0x0c, 0x35, 0x27, 0xfe, 0x03, 0xe8, 0x0c, 0x33, 0xcc, 0xae, 0xd6, 0x92, 0xe5, 0x5f, 0xf2,
+ 0xb4, 0x33, 0x3d, 0x71, 0x8b, 0xde, 0xfb, 0xbc, 0xcf, 0xdb, 0x7d, 0xfb, 0x7e, 0x39, 0x70, 0x79,
+ 0xef, 0x02, 0xd3, 0x2c, 0xa7, 0xb8, 0xe7, 0x97, 0x08, 0xb5, 0x89, 0x47, 0x58, 0xb1, 0x41, 0xec,
+ 0xb2, 0x43, 0x8b, 0x52, 0x61, 0xb8, 0x56, 0x71, 0xb7, 0xe6, 0xec, 0x9b, 0x8e, 0xed, 0x51, 0xa7,
+ 0x56, 0x6c, 0x9c, 0x2e, 0x11, 0xcf, 0x38, 0x5d, 0xac, 0x10, 0x9b, 0x50, 0xc3, 0x23, 0x65, 0xcd,
+ 0xa5, 0x8e, 0xe7, 0xa0, 0x7c, 0x80, 0xd7, 0x0c, 0xd7, 0xd2, 0x3a, 0xf0, 0x9a, 0xc4, 0xaf, 0x9c,
+ 0xac, 0x58, 0x5e, 0xd5, 0x2f, 0x69, 0xa6, 0x53, 0x2f, 0x56, 0x9c, 0x8a, 0x53, 0x14, 0x66, 0x25,
+ 0x7f, 0x57, 0x7c, 0x89, 0x0f, 0xf1, 0x57, 0x40, 0xb7, 0x72, 0x36, 0x72, 0x5f, 0x37, 0xcc, 0xaa,
+ 0x65, 0x13, 0x7a, 0x50, 0x74, 0xf7, 0x2a, 0x5c, 0xc0, 0x8a, 0x75, 0xe2, 0x19, 0xc5, 0x46, 0xcf,
+ 0x21, 0x56, 0x8a, 0x83, 0xac, 0xa8, 0x6f, 0x7b, 0x56, 0x9d, 0xf4, 0x18, 0xbc, 0x91, 0x64, 0xc0,
+ 0xcc, 0x2a, 0xa9, 0x1b, 0xdd, 0x76, 0xea, 0x4f, 0x0a, 0xac, 0x5e, 0xfa, 0x9c, 0xd4, 0x5d, 0x6f,
+ 0x9b, 0x5a, 0x0e, 0xb5, 0xbc, 0x83, 0x0d, 0xd2, 0x20, 0xb5, 0x75, 0xc7, 0xde, 0xb5, 0x2a, 0x3e,
+ 0x35, 0x3c, 0xcb, 0xb1, 0xd1, 0x2d, 0xc8, 0xd9, 0x4e, 0xdd, 0xb2, 0x0d, 0x2e, 0x37, 0x7d, 0x4a,
+ 0x89, 0x6d, 0x1e, 0xec, 0x54, 0x0d, 0x4a, 0x58, 0x4e, 0x59, 0x55, 0x5e, 0xc9, 0xe8, 0xff, 0x6f,
+ 0x35, 0x0b, 0xb9, 0xad, 0x01, 0x18, 0x3c, 0xd0, 0x1a, 0xbd, 0x03, 0xf3, 0x35, 0x62, 0x97, 0x8d,
+ 0x52, 0x8d, 0x6c, 0x13, 0x6a, 0x12, 0xdb, 0xcb, 0xa5, 0x04, 0xe1, 0x62, 0xab, 0x59, 0x98, 0xdf,
+ 0x88, 0xab, 0x70, 0x37, 0x56, 0xbd, 0x0d, 0xcb, 0x1f, 0xd4, 0x9c, 0xfd, 0x8b, 0x16, 0xf3, 0x2c,
+ 0xbb, 0xe2, 0x5b, 0xac, 0x4a, 0xe8, 0x26, 0xf1, 0xaa, 0x4e, 0x19, 0xbd, 0x07, 0x69, 0xef, 0xc0,
+ 0x25, 0xe2, 0x7c, 0x59, 0xfd, 0xf8, 0xc3, 0x66, 0x61, 0xac, 0xd5, 0x2c, 0xa4, 0x6f, 0x1c, 0xb8,
+ 0xe4, 0x69, 0xb3, 0x70, 0x64, 0x80, 0x19, 0x57, 0x63, 0x61, 0xa8, 0x7e, 0x93, 0x02, 0xe0, 0xa8,
+ 0x1d, 0x11, 0x38, 0x74, 0x17, 0xa6, 0xf8, 0x63, 0x95, 0x0d, 0xcf, 0x10, 0x9c, 0xd3, 0x67, 0x4e,
+ 0x69, 0x51, 0xa6, 0x84, 0x31, 0xd7, 0xdc, 0xbd, 0x0a, 0x17, 0x30, 0x8d, 0xa3, 0xb5, 0xc6, 0x69,
+ 0xed, 0x5a, 0xe9, 0x1e, 0x31, 0xbd, 0x4d, 0xe2, 0x19, 0x3a, 0x92, 0xa7, 0x80, 0x48, 0x86, 0x43,
+ 0x56, 0xb4, 0x0d, 0x69, 0xe6, 0x12, 0x53, 0x04, 0x60, 0xfa, 0x8c, 0xa6, 0x0d, 0xcf, 0x43, 0x2d,
+ 0x3a, 0xdb, 0x8e, 0x4b, 0x4c, 0x7d, 0xa6, 0x7d, 0x43, 0xfe, 0x85, 0x05, 0x13, 0xba, 0x05, 0x13,
+ 0xcc, 0x33, 0x3c, 0x9f, 0xe5, 0xc6, 0x7b, 0x4e, 0x9c, 0xc4, 0x29, 0xec, 0xf4, 0x39, 0xc9, 0x3a,
+ 0x11, 0x7c, 0x63, 0xc9, 0xa7, 0x3e, 0x4e, 0xc1, 0x62, 0x04, 0x5e, 0x77, 0xec, 0xb2, 0x25, 0x32,
+ 0xe5, 0xed, 0x58, 0xd4, 0x8f, 0x75, 0x45, 0x7d, 0xb9, 0x8f, 0x49, 0x14, 0x71, 0xf4, 0x66, 0x78,
+ 0xdc, 0x94, 0x30, 0x3f, 0x1a, 0x77, 0xfe, 0xb4, 0x59, 0x98, 0x0f, 0xcd, 0xe2, 0xe7, 0x41, 0x0d,
+ 0x40, 0x35, 0x83, 0x79, 0x37, 0xa8, 0x61, 0xb3, 0x80, 0xd6, 0xaa, 0x13, 0x79, 0xeb, 0xd7, 0x46,
+ 0x7b, 0x27, 0x6e, 0xa1, 0xaf, 0x48, 0x97, 0x68, 0xa3, 0x87, 0x0d, 0xf7, 0xf1, 0x80, 0x5e, 0x86,
+ 0x09, 0x4a, 0x0c, 0xe6, 0xd8, 0xb9, 0xb4, 0x38, 0x72, 0x18, 0x2f, 0x2c, 0xa4, 0x58, 0x6a, 0xd1,
+ 0xab, 0x30, 0x59, 0x27, 0x8c, 0x19, 0x15, 0x92, 0xcb, 0x08, 0xe0, 0xbc, 0x04, 0x4e, 0x6e, 0x06,
+ 0x62, 0xdc, 0xd6, 0xab, 0x3f, 0x2b, 0x30, 0x17, 0xc5, 0x69, 0xc3, 0x62, 0x1e, 0xba, 0xd3, 0x93,
+ 0x7b, 0xda, 0x68, 0x77, 0xe2, 0xd6, 0x22, 0xf3, 0x16, 0xa4, 0xbb, 0xa9, 0xb6, 0xa4, 0x23, 0xef,
+ 0xae, 0x41, 0xc6, 0xf2, 0x48, 0x9d, 0x47, 0x7d, 0xbc, 0x2b, 0x5c, 0x09, 0x49, 0xa2, 0xcf, 0x4a,
+ 0xda, 0xcc, 0x15, 0x4e, 0x80, 0x03, 0x1e, 0xf5, 0xcf, 0xf1, 0xce, 0x1b, 0xf0, 0x7c, 0x44, 0xdf,
+ 0x2b, 0xb0, 0xe2, 0x0e, 0x6c, 0x30, 0xf2, 0x52, 0xeb, 0x49, 0x9e, 0x07, 0xb7, 0x28, 0x4c, 0x76,
+ 0x09, 0xef, 0x2b, 0x44, 0x57, 0xe5, 0x91, 0x56, 0x86, 0x80, 0x87, 0x1c, 0x05, 0x7d, 0x04, 0xa8,
+ 0x6e, 0x78, 0x3c, 0xa2, 0x95, 0x6d, 0x4a, 0x4c, 0x52, 0xe6, 0xac, 0xb2, 0x29, 0x85, 0xd9, 0xb1,
+ 0xd9, 0x83, 0xc0, 0x7d, 0xac, 0xd0, 0x57, 0x0a, 0x2c, 0x96, 0x7b, 0x9b, 0x8c, 0xcc, 0xcb, 0xf3,
+ 0xa3, 0x04, 0xba, 0x4f, 0x8f, 0xd2, 0x97, 0x5b, 0xcd, 0xc2, 0x62, 0x1f, 0x05, 0xee, 0xe7, 0x0c,
+ 0xdd, 0x81, 0x0c, 0xf5, 0x6b, 0x84, 0xe5, 0xd2, 0xe2, 0x79, 0x13, 0xbd, 0x6e, 0x3b, 0x35, 0xcb,
+ 0x3c, 0xc0, 0xdc, 0xe4, 0x13, 0xcb, 0xab, 0xee, 0xf8, 0xa2, 0x57, 0xb1, 0xe8, 0xad, 0x85, 0x0a,
+ 0x07, 0xa4, 0xea, 0x03, 0x58, 0xe8, 0x6e, 0x1a, 0xa8, 0x02, 0x60, 0xb6, 0xeb, 0x94, 0x0f, 0x08,
+ 0xee, 0xf6, 0xf5, 0xd1, 0xb3, 0x2a, 0xac, 0xf1, 0xa8, 0x5f, 0x86, 0x22, 0x86, 0x3b, 0xa8, 0xd5,
+ 0x53, 0x30, 0x73, 0x99, 0x3a, 0xbe, 0x2b, 0xcf, 0x88, 0x56, 0x21, 0x6d, 0x1b, 0xf5, 0x76, 0xf7,
+ 0x09, 0x3b, 0xe2, 0x96, 0x51, 0x27, 0x58, 0x68, 0xd4, 0xef, 0x14, 0x98, 0xdd, 0xb0, 0xea, 0x96,
+ 0x87, 0x09, 0x73, 0x1d, 0x9b, 0x11, 0x74, 0x2e, 0xd6, 0xb1, 0x8e, 0x76, 0x75, 0xac, 0x43, 0x31,
+ 0x70, 0x47, 0xaf, 0xfa, 0x14, 0x26, 0xef, 0xfb, 0xc4, 0xb7, 0xec, 0x8a, 0xec, 0xd7, 0x67, 0x93,
+ 0x2e, 0x78, 0x3d, 0x80, 0xc7, 0xb2, 0x4d, 0x9f, 0xe6, 0x2d, 0x40, 0x6a, 0x70, 0x9b, 0x51, 0xfd,
+ 0x27, 0x05, 0x47, 0x85, 0x63, 0x52, 0x1e, 0x32, 0x95, 0xef, 0x40, 0xce, 0x60, 0xcc, 0xa7, 0xa4,
+ 0x3c, 0x68, 0x2a, 0xaf, 0xca, 0xdb, 0xe4, 0xd6, 0x06, 0xe0, 0xf0, 0x40, 0x06, 0x74, 0x0f, 0x66,
+ 0x6b, 0x9d, 0x77, 0x97, 0xd7, 0x3c, 0x99, 0x74, 0xcd, 0x58, 0xc0, 0xf4, 0x25, 0x79, 0x82, 0x78,
+ 0xd0, 0x71, 0x9c, 0xba, 0xdf, 0x16, 0x30, 0x3e, 0xfa, 0x16, 0x80, 0xae, 0xc1, 0x52, 0xc9, 0xa1,
+ 0xd4, 0xd9, 0xb7, 0xec, 0x8a, 0xf0, 0xd3, 0x26, 0x49, 0x0b, 0x92, 0xff, 0xb5, 0x9a, 0x85, 0x25,
+ 0xbd, 0x1f, 0x00, 0xf7, 0xb7, 0x53, 0xf7, 0x61, 0x69, 0x8b, 0xf7, 0x14, 0xe6, 0xf8, 0xd4, 0x24,
+ 0x51, 0x41, 0xa0, 0x02, 0x64, 0x1a, 0x84, 0x96, 0x82, 0xa4, 0xce, 0xea, 0x59, 0x5e, 0x0e, 0x1f,
+ 0x73, 0x01, 0x0e, 0xe4, 0xfc, 0x26, 0x76, 0x64, 0x79, 0x13, 0x6f, 0xb0, 0xdc, 0x84, 0x80, 0x8a,
+ 0x9b, 0x6c, 0xc5, 0x55, 0xb8, 0x1b, 0xab, 0x36, 0x53, 0xb0, 0x3c, 0xa0, 0xfe, 0xd0, 0x4d, 0x98,
+ 0x62, 0xf2, 0x6f, 0x59, 0x53, 0xc7, 0x92, 0xde, 0x42, 0xda, 0x46, 0xdd, 0xbf, 0x4d, 0x86, 0x43,
+ 0x2a, 0xe4, 0xc0, 0x2c, 0x95, 0x47, 0x10, 0x3e, 0xe5, 0x14, 0x38, 0x93, 0xc4, 0xdd, 0x1b, 0x9d,
+ 0xe8, 0xb1, 0x71, 0x27, 0x21, 0x8e, 0xf3, 0xa3, 0x07, 0xb0, 0xd0, 0x71, 0xed, 0xc0, 0xe7, 0xb8,
+ 0xf0, 0x79, 0x2e, 0xc9, 0x67, 0xdf, 0x47, 0xd1, 0x73, 0xd2, 0xed, 0xc2, 0x56, 0x17, 0x2d, 0xee,
+ 0x71, 0xa4, 0xfe, 0x9a, 0x82, 0x21, 0x83, 0xe1, 0x05, 0x2c, 0x79, 0x77, 0x63, 0x4b, 0xde, 0xbb,
+ 0xcf, 0x3e, 0xf1, 0x06, 0x2e, 0x7d, 0xd5, 0xae, 0xa5, 0xef, 0xfd, 0xe7, 0xf0, 0x31, 0x7c, 0x09,
+ 0xfc, 0x2b, 0x05, 0x2f, 0x0d, 0x36, 0x8e, 0x96, 0xc2, 0xab, 0xb1, 0x16, 0x7b, 0xbe, 0xab, 0xc5,
+ 0x1e, 0x1b, 0x81, 0xe2, 0xbf, 0x25, 0xb1, 0x6b, 0x49, 0xfc, 0x4d, 0x81, 0xfc, 0xe0, 0xb8, 0xbd,
+ 0x80, 0xa5, 0xf1, 0xb3, 0xf8, 0xd2, 0xf8, 0xd6, 0xb3, 0x27, 0xd9, 0x80, 0x25, 0xf2, 0xf2, 0xb0,
+ 0xdc, 0x0a, 0xd7, 0xbd, 0x11, 0x46, 0xfe, 0x0f, 0xa9, 0x61, 0xa1, 0x12, 0xdb, 0x69, 0xc2, 0xaf,
+ 0x96, 0x98, 0xf5, 0x25, 0x9b, 0x8f, 0x9e, 0x3a, 0x9f, 0x1e, 0x41, 0x42, 0x56, 0x61, 0xb2, 0x16,
+ 0xcc, 0x6a, 0x59, 0xd4, 0x6b, 0x23, 0x8d, 0xc8, 0x61, 0xa3, 0x3d, 0x58, 0x0b, 0x24, 0x0c, 0xb7,
+ 0xe9, 0x51, 0x19, 0x26, 0x88, 0xf8, 0xa9, 0x3e, 0x6a, 0x65, 0x27, 0xfd, 0xb0, 0xd7, 0x81, 0x67,
+ 0x61, 0x80, 0xc2, 0x92, 0x5b, 0xfd, 0x56, 0x81, 0xd5, 0xa4, 0x96, 0x80, 0xf6, 0xfb, 0xac, 0x78,
+ 0xcf, 0xb1, 0xbe, 0x8f, 0xbe, 0xf2, 0xfd, 0xa8, 0xc0, 0xe1, 0x7e, 0x9b, 0x14, 0x2f, 0x32, 0xbe,
+ 0x3e, 0x85, 0xbb, 0x4f, 0x58, 0x64, 0xd7, 0x85, 0x14, 0x4b, 0x2d, 0x3a, 0x01, 0x53, 0x55, 0xc3,
+ 0x2e, 0xef, 0x58, 0x5f, 0xb4, 0xb7, 0xfa, 0x30, 0xcd, 0x3f, 0x94, 0x72, 0x1c, 0x22, 0xd0, 0x45,
+ 0x58, 0x10, 0x76, 0x1b, 0xc4, 0xae, 0x78, 0x55, 0xf1, 0x22, 0x72, 0x35, 0x09, 0xa7, 0xce, 0xf5,
+ 0x2e, 0x3d, 0xee, 0xb1, 0x50, 0xff, 0x56, 0x00, 0x3d, 0xcb, 0x36, 0x71, 0x1c, 0xb2, 0x86, 0x6b,
+ 0x89, 0x15, 0x37, 0x28, 0xb4, 0xac, 0x3e, 0xdb, 0x6a, 0x16, 0xb2, 0x6b, 0xdb, 0x57, 0x02, 0x21,
+ 0x8e, 0xf4, 0x1c, 0xdc, 0x1e, 0xb4, 0xc1, 0x40, 0x95, 0xe0, 0xb6, 0x63, 0x86, 0x23, 0x3d, 0xba,
+ 0x00, 0x33, 0x66, 0xcd, 0x67, 0x1e, 0xa1, 0x3b, 0xa6, 0xe3, 0x12, 0xd1, 0x98, 0xa6, 0xf4, 0xc3,
+ 0xf2, 0x4e, 0x33, 0xeb, 0x1d, 0x3a, 0x1c, 0x43, 0x22, 0x0d, 0x80, 0x97, 0x15, 0x73, 0x0d, 0xee,
+ 0x27, 0x23, 0xfc, 0xcc, 0xf1, 0x07, 0xdb, 0x0a, 0xa5, 0xb8, 0x03, 0xa1, 0xde, 0x83, 0xa5, 0x1d,
+ 0x42, 0x1b, 0x96, 0x49, 0xd6, 0x4c, 0xd3, 0xf1, 0x6d, 0xaf, 0xbd, 0xac, 0x17, 0x21, 0x1b, 0xc2,
+ 0x64, 0xe5, 0x1d, 0x92, 0xfe, 0xb3, 0x21, 0x17, 0x8e, 0x30, 0x61, 0xa9, 0xa7, 0x06, 0x96, 0xfa,
+ 0x2f, 0x29, 0x98, 0x8c, 0xe8, 0xd3, 0x7b, 0x96, 0x5d, 0x96, 0xcc, 0x47, 0xda, 0xe8, 0xab, 0x96,
+ 0x5d, 0x7e, 0xda, 0x2c, 0x4c, 0x4b, 0x18, 0xff, 0xc4, 0x02, 0x88, 0xae, 0x40, 0xda, 0x67, 0x84,
+ 0xca, 0x22, 0x3e, 0x9e, 0x94, 0xcc, 0x37, 0x19, 0xa1, 0xed, 0xfd, 0x6a, 0x8a, 0x33, 0x73, 0x01,
+ 0x16, 0x14, 0x68, 0x13, 0x32, 0x15, 0xfe, 0x28, 0xb2, 0x4e, 0x4f, 0x24, 0x71, 0x75, 0xfe, 0x88,
+ 0x09, 0xd2, 0x40, 0x48, 0x70, 0xc0, 0x82, 0xee, 0xc3, 0x1c, 0x8b, 0x85, 0x50, 0x3c, 0xd7, 0x08,
+ 0xfb, 0x52, 0xdf, 0xc0, 0xeb, 0xa8, 0xd5, 0x2c, 0xcc, 0xc5, 0x55, 0xb8, 0xcb, 0x81, 0x5a, 0x84,
+ 0xe9, 0x8e, 0x0b, 0x26, 0x77, 0x59, 0xfd, 0xe2, 0xc3, 0x27, 0xf9, 0xb1, 0x47, 0x4f, 0xf2, 0x63,
+ 0x8f, 0x9f, 0xe4, 0xc7, 0xbe, 0x6c, 0xe5, 0x95, 0x87, 0xad, 0xbc, 0xf2, 0xa8, 0x95, 0x57, 0x1e,
+ 0xb7, 0xf2, 0xca, 0xef, 0xad, 0xbc, 0xf2, 0xf5, 0x1f, 0xf9, 0xb1, 0xdb, 0xf9, 0xe1, 0xff, 0x8b,
+ 0xfd, 0x37, 0x00, 0x00, 0xff, 0xff, 0x3a, 0xda, 0x82, 0x48, 0xc5, 0x15, 0x00, 0x00,
+}
+
+func (m *ExemptPriorityLevelConfiguration) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ExemptPriorityLevelConfiguration) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ExemptPriorityLevelConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.LendablePercent != nil {
+ i = encodeVarintGenerated(dAtA, i, uint64(*m.LendablePercent))
+ i--
+ dAtA[i] = 0x10
+ }
+ if m.NominalConcurrencyShares != nil {
+ i = encodeVarintGenerated(dAtA, i, uint64(*m.NominalConcurrencyShares))
+ i--
+ dAtA[i] = 0x8
+ }
+ return len(dAtA) - i, nil
}
func (m *FlowDistinguisherMethod) Marshal() (dAtA []byte, err error) {
@@ -1491,6 +1556,18 @@ func (m *PriorityLevelConfigurationSpec) MarshalToSizedBuffer(dAtA []byte) (int,
_ = i
var l int
_ = l
+ if m.Exempt != nil {
+ {
+ size, err := m.Exempt.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
if m.Limited != nil {
{
size, err := m.Limited.MarshalToSizedBuffer(dAtA[:i])
@@ -1783,6 +1860,21 @@ func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
dAtA[offset] = uint8(v)
return base
}
+func (m *ExemptPriorityLevelConfiguration) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.NominalConcurrencyShares != nil {
+ n += 1 + sovGenerated(uint64(*m.NominalConcurrencyShares))
+ }
+ if m.LendablePercent != nil {
+ n += 1 + sovGenerated(uint64(*m.LendablePercent))
+ }
+ return n
+}
+
func (m *FlowDistinguisherMethod) Size() (n int) {
if m == nil {
return 0
@@ -2048,6 +2140,10 @@ func (m *PriorityLevelConfigurationSpec) Size() (n int) {
l = m.Limited.Size()
n += 1 + l + sovGenerated(uint64(l))
}
+ if m.Exempt != nil {
+ l = m.Exempt.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
return n
}
@@ -2165,6 +2261,17 @@ func sovGenerated(x uint64) (n int) {
func sozGenerated(x uint64) (n int) {
return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
+func (this *ExemptPriorityLevelConfiguration) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ExemptPriorityLevelConfiguration{`,
+ `NominalConcurrencyShares:` + valueToStringGenerated(this.NominalConcurrencyShares) + `,`,
+ `LendablePercent:` + valueToStringGenerated(this.LendablePercent) + `,`,
+ `}`,
+ }, "")
+ return s
+}
func (this *FlowDistinguisherMethod) String() string {
if this == nil {
return "nil"
@@ -2381,6 +2488,7 @@ func (this *PriorityLevelConfigurationSpec) String() string {
s := strings.Join([]string{`&PriorityLevelConfigurationSpec{`,
`Type:` + fmt.Sprintf("%v", this.Type) + `,`,
`Limited:` + strings.Replace(this.Limited.String(), "LimitedPriorityLevelConfiguration", "LimitedPriorityLevelConfiguration", 1) + `,`,
+ `Exempt:` + strings.Replace(this.Exempt.String(), "ExemptPriorityLevelConfiguration", "ExemptPriorityLevelConfiguration", 1) + `,`,
`}`,
}, "")
return s
@@ -2468,6 +2576,96 @@ func valueToStringGenerated(v interface{}) string {
pv := reflect.Indirect(rv).Interface()
return fmt.Sprintf("*%v", pv)
}
+func (m *ExemptPriorityLevelConfiguration) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ExemptPriorityLevelConfiguration: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ExemptPriorityLevelConfiguration: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NominalConcurrencyShares", wireType)
+ }
+ var v int32
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.NominalConcurrencyShares = &v
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LendablePercent", wireType)
+ }
+ var v int32
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.LendablePercent = &v
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
func (m *FlowDistinguisherMethod) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
@@ -4547,6 +4745,42 @@ func (m *PriorityLevelConfigurationSpec) Unmarshal(dAtA []byte) error {
return err
}
iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Exempt", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Exempt == nil {
+ m.Exempt = &ExemptPriorityLevelConfiguration{}
+ }
+ if err := m.Exempt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
diff --git a/vendor/k8s.io/api/flowcontrol/v1beta1/generated.proto b/vendor/k8s.io/api/flowcontrol/v1beta1/generated.proto
index 98bfabe9c6..96df0ace79 100644
--- a/vendor/k8s.io/api/flowcontrol/v1beta1/generated.proto
+++ b/vendor/k8s.io/api/flowcontrol/v1beta1/generated.proto
@@ -28,6 +28,40 @@ import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
// Package-wide variables from generator "generated".
option go_package = "k8s.io/api/flowcontrol/v1beta1";
+// ExemptPriorityLevelConfiguration describes the configurable aspects
+// of the handling of exempt requests.
+// In the mandatory exempt configuration object the values in the fields
+// here can be modified by authorized users, unlike the rest of the `spec`.
+message ExemptPriorityLevelConfiguration {
+ // `nominalConcurrencyShares` (NCS) contributes to the computation of the
+ // NominalConcurrencyLimit (NominalCL) of this level.
+ // This is the number of execution seats nominally reserved for this priority level.
+ // This DOES NOT limit the dispatching from this priority level
+ // but affects the other priority levels through the borrowing mechanism.
+ // The server's concurrency limit (ServerCL) is divided among all the
+ // priority levels in proportion to their NCS values:
+ //
+ // NominalCL(i) = ceil( ServerCL * NCS(i) / sum_ncs )
+ // sum_ncs = sum[priority level k] NCS(k)
+ //
+ // Bigger numbers mean a larger nominal concurrency limit,
+ // at the expense of every other priority level.
+ // This field has a default value of zero.
+ // +optional
+ optional int32 nominalConcurrencyShares = 1;
+
+ // `lendablePercent` prescribes the fraction of the level's NominalCL that
+ // can be borrowed by other priority levels. This value of this
+ // field must be between 0 and 100, inclusive, and it defaults to 0.
+ // The number of seats that other levels can borrow from this level, known
+ // as this level's LendableConcurrencyLimit (LendableCL), is defined as follows.
+ //
+ // LendableCL(i) = round( NominalCL(i) * lendablePercent(i)/100.0 )
+ //
+ // +optional
+ optional int32 lendablePercent = 2;
+}
+
// FlowDistinguisherMethod specifies the method of a flow distinguisher.
message FlowDistinguisherMethod {
// `type` is the type of flow distinguisher method
@@ -332,6 +366,14 @@ message PriorityLevelConfigurationSpec {
// This field must be non-empty if and only if `type` is `"Limited"`.
// +optional
optional LimitedPriorityLevelConfiguration limited = 2;
+
+ // `exempt` specifies how requests are handled for an exempt priority level.
+ // This field MUST be empty if `type` is `"Limited"`.
+ // This field MAY be non-empty if `type` is `"Exempt"`.
+ // If empty and `type` is `"Exempt"` then the default values
+ // for `ExemptPriorityLevelConfiguration` apply.
+ // +optional
+ optional ExemptPriorityLevelConfiguration exempt = 3;
}
// PriorityLevelConfigurationStatus represents the current state of a "request-priority".
diff --git a/vendor/k8s.io/api/flowcontrol/v1beta1/types.go b/vendor/k8s.io/api/flowcontrol/v1beta1/types.go
index c3b7f607a7..9e05ff1a09 100644
--- a/vendor/k8s.io/api/flowcontrol/v1beta1/types.go
+++ b/vendor/k8s.io/api/flowcontrol/v1beta1/types.go
@@ -77,7 +77,9 @@ const (
// is a boolean false or has an invalid boolean representation
// (if the cluster operator sets it to 'false' it will be stomped)
// - any changes to the spec made by the cluster operator will be
- // stomped.
+ // stomped, except for changes to the `nominalConcurrencyShares`
+ // and `lendablePercent` fields of the PriorityLevelConfiguration
+ // named "exempt".
//
// The kube-apiserver will apply updates on the suggested configuration if:
// - the cluster operator has enabled auto-update by setting the annotation
@@ -435,6 +437,14 @@ type PriorityLevelConfigurationSpec struct {
// This field must be non-empty if and only if `type` is `"Limited"`.
// +optional
Limited *LimitedPriorityLevelConfiguration `json:"limited,omitempty" protobuf:"bytes,2,opt,name=limited"`
+
+ // `exempt` specifies how requests are handled for an exempt priority level.
+ // This field MUST be empty if `type` is `"Limited"`.
+ // This field MAY be non-empty if `type` is `"Exempt"`.
+ // If empty and `type` is `"Exempt"` then the default values
+ // for `ExemptPriorityLevelConfiguration` apply.
+ // +optional
+ Exempt *ExemptPriorityLevelConfiguration `json:"exempt,omitempty" protobuf:"bytes,3,opt,name=exempt"`
}
// PriorityLevelEnablement indicates whether limits on execution are enabled for the priority level
@@ -505,6 +515,43 @@ type LimitedPriorityLevelConfiguration struct {
BorrowingLimitPercent *int32 `json:"borrowingLimitPercent,omitempty" protobuf:"varint,4,opt,name=borrowingLimitPercent"`
}
+// ExemptPriorityLevelConfiguration describes the configurable aspects
+// of the handling of exempt requests.
+// In the mandatory exempt configuration object the values in the fields
+// here can be modified by authorized users, unlike the rest of the `spec`.
+type ExemptPriorityLevelConfiguration struct {
+ // `nominalConcurrencyShares` (NCS) contributes to the computation of the
+ // NominalConcurrencyLimit (NominalCL) of this level.
+ // This is the number of execution seats nominally reserved for this priority level.
+ // This DOES NOT limit the dispatching from this priority level
+ // but affects the other priority levels through the borrowing mechanism.
+ // The server's concurrency limit (ServerCL) is divided among all the
+ // priority levels in proportion to their NCS values:
+ //
+ // NominalCL(i) = ceil( ServerCL * NCS(i) / sum_ncs )
+ // sum_ncs = sum[priority level k] NCS(k)
+ //
+ // Bigger numbers mean a larger nominal concurrency limit,
+ // at the expense of every other priority level.
+ // This field has a default value of zero.
+ // +optional
+ NominalConcurrencyShares *int32 `json:"nominalConcurrencyShares,omitempty" protobuf:"varint,1,opt,name=nominalConcurrencyShares"`
+ // `lendablePercent` prescribes the fraction of the level's NominalCL that
+ // can be borrowed by other priority levels. This value of this
+ // field must be between 0 and 100, inclusive, and it defaults to 0.
+ // The number of seats that other levels can borrow from this level, known
+ // as this level's LendableConcurrencyLimit (LendableCL), is defined as follows.
+ //
+ // LendableCL(i) = round( NominalCL(i) * lendablePercent(i)/100.0 )
+ //
+ // +optional
+ LendablePercent *int32 `json:"lendablePercent,omitempty" protobuf:"varint,2,opt,name=lendablePercent"`
+ // The `BorrowingCL` of an Exempt priority level is implicitly `ServerCL`.
+ // In other words, an exempt priority level
+ // has no meaningful limit on how much it borrows.
+ // There is no explicit representation of that here.
+}
+
// LimitResponse defines how to handle requests that can not be executed right now.
// +union
type LimitResponse struct {
diff --git a/vendor/k8s.io/api/flowcontrol/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/flowcontrol/v1beta1/types_swagger_doc_generated.go
index fc08e128db..1405f3c3ca 100644
--- a/vendor/k8s.io/api/flowcontrol/v1beta1/types_swagger_doc_generated.go
+++ b/vendor/k8s.io/api/flowcontrol/v1beta1/types_swagger_doc_generated.go
@@ -27,6 +27,16 @@ package v1beta1
// Those methods can be generated by using hack/update-codegen.sh
// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT.
+var map_ExemptPriorityLevelConfiguration = map[string]string{
+ "": "ExemptPriorityLevelConfiguration describes the configurable aspects of the handling of exempt requests. In the mandatory exempt configuration object the values in the fields here can be modified by authorized users, unlike the rest of the `spec`.",
+ "nominalConcurrencyShares": "`nominalConcurrencyShares` (NCS) contributes to the computation of the NominalConcurrencyLimit (NominalCL) of this level. This is the number of execution seats nominally reserved for this priority level. This DOES NOT limit the dispatching from this priority level but affects the other priority levels through the borrowing mechanism. The server's concurrency limit (ServerCL) is divided among all the priority levels in proportion to their NCS values:\n\nNominalCL(i) = ceil( ServerCL * NCS(i) / sum_ncs ) sum_ncs = sum[priority level k] NCS(k)\n\nBigger numbers mean a larger nominal concurrency limit, at the expense of every other priority level. This field has a default value of zero.",
+ "lendablePercent": "`lendablePercent` prescribes the fraction of the level's NominalCL that can be borrowed by other priority levels. This value of this field must be between 0 and 100, inclusive, and it defaults to 0. The number of seats that other levels can borrow from this level, known as this level's LendableConcurrencyLimit (LendableCL), is defined as follows.\n\nLendableCL(i) = round( NominalCL(i) * lendablePercent(i)/100.0 )",
+}
+
+func (ExemptPriorityLevelConfiguration) SwaggerDoc() map[string]string {
+ return map_ExemptPriorityLevelConfiguration
+}
+
var map_FlowDistinguisherMethod = map[string]string{
"": "FlowDistinguisherMethod specifies the method of a flow distinguisher.",
"type": "`type` is the type of flow distinguisher method The supported types are \"ByUser\" and \"ByNamespace\". Required.",
@@ -190,6 +200,7 @@ var map_PriorityLevelConfigurationSpec = map[string]string{
"": "PriorityLevelConfigurationSpec specifies the configuration of a priority level.",
"type": "`type` indicates whether this priority level is subject to limitation on request execution. A value of `\"Exempt\"` means that requests of this priority level are not subject to a limit (and thus are never queued) and do not detract from the capacity made available to other priority levels. A value of `\"Limited\"` means that (a) requests of this priority level _are_ subject to limits and (b) some of the server's limited capacity is made available exclusively to this priority level. Required.",
"limited": "`limited` specifies how requests are handled for a Limited priority level. This field must be non-empty if and only if `type` is `\"Limited\"`.",
+ "exempt": "`exempt` specifies how requests are handled for an exempt priority level. This field MUST be empty if `type` is `\"Limited\"`. This field MAY be non-empty if `type` is `\"Exempt\"`. If empty and `type` is `\"Exempt\"` then the default values for `ExemptPriorityLevelConfiguration` apply.",
}
func (PriorityLevelConfigurationSpec) SwaggerDoc() map[string]string {
diff --git a/vendor/k8s.io/api/flowcontrol/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/flowcontrol/v1beta1/zz_generated.deepcopy.go
index 027c3057f8..965d5e55a3 100644
--- a/vendor/k8s.io/api/flowcontrol/v1beta1/zz_generated.deepcopy.go
+++ b/vendor/k8s.io/api/flowcontrol/v1beta1/zz_generated.deepcopy.go
@@ -25,6 +25,32 @@ import (
runtime "k8s.io/apimachinery/pkg/runtime"
)
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ExemptPriorityLevelConfiguration) DeepCopyInto(out *ExemptPriorityLevelConfiguration) {
+ *out = *in
+ if in.NominalConcurrencyShares != nil {
+ in, out := &in.NominalConcurrencyShares, &out.NominalConcurrencyShares
+ *out = new(int32)
+ **out = **in
+ }
+ if in.LendablePercent != nil {
+ in, out := &in.LendablePercent, &out.LendablePercent
+ *out = new(int32)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExemptPriorityLevelConfiguration.
+func (in *ExemptPriorityLevelConfiguration) DeepCopy() *ExemptPriorityLevelConfiguration {
+ if in == nil {
+ return nil
+ }
+ out := new(ExemptPriorityLevelConfiguration)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *FlowDistinguisherMethod) DeepCopyInto(out *FlowDistinguisherMethod) {
*out = *in
@@ -400,6 +426,11 @@ func (in *PriorityLevelConfigurationSpec) DeepCopyInto(out *PriorityLevelConfigu
*out = new(LimitedPriorityLevelConfiguration)
(*in).DeepCopyInto(*out)
}
+ if in.Exempt != nil {
+ in, out := &in.Exempt, &out.Exempt
+ *out = new(ExemptPriorityLevelConfiguration)
+ (*in).DeepCopyInto(*out)
+ }
return
}
diff --git a/vendor/k8s.io/api/flowcontrol/v1beta2/generated.pb.go b/vendor/k8s.io/api/flowcontrol/v1beta2/generated.pb.go
index b4c8f958f1..7f8ee08506 100644
--- a/vendor/k8s.io/api/flowcontrol/v1beta2/generated.pb.go
+++ b/vendor/k8s.io/api/flowcontrol/v1beta2/generated.pb.go
@@ -43,10 +43,38 @@ var _ = math.Inf
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+func (m *ExemptPriorityLevelConfiguration) Reset() { *m = ExemptPriorityLevelConfiguration{} }
+func (*ExemptPriorityLevelConfiguration) ProtoMessage() {}
+func (*ExemptPriorityLevelConfiguration) Descriptor() ([]byte, []int) {
+ return fileDescriptor_ed300aa8e672704e, []int{0}
+}
+func (m *ExemptPriorityLevelConfiguration) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ExemptPriorityLevelConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ExemptPriorityLevelConfiguration) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ExemptPriorityLevelConfiguration.Merge(m, src)
+}
+func (m *ExemptPriorityLevelConfiguration) XXX_Size() int {
+ return m.Size()
+}
+func (m *ExemptPriorityLevelConfiguration) XXX_DiscardUnknown() {
+ xxx_messageInfo_ExemptPriorityLevelConfiguration.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ExemptPriorityLevelConfiguration proto.InternalMessageInfo
+
func (m *FlowDistinguisherMethod) Reset() { *m = FlowDistinguisherMethod{} }
func (*FlowDistinguisherMethod) ProtoMessage() {}
func (*FlowDistinguisherMethod) Descriptor() ([]byte, []int) {
- return fileDescriptor_ed300aa8e672704e, []int{0}
+ return fileDescriptor_ed300aa8e672704e, []int{1}
}
func (m *FlowDistinguisherMethod) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -74,7 +102,7 @@ var xxx_messageInfo_FlowDistinguisherMethod proto.InternalMessageInfo
func (m *FlowSchema) Reset() { *m = FlowSchema{} }
func (*FlowSchema) ProtoMessage() {}
func (*FlowSchema) Descriptor() ([]byte, []int) {
- return fileDescriptor_ed300aa8e672704e, []int{1}
+ return fileDescriptor_ed300aa8e672704e, []int{2}
}
func (m *FlowSchema) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -102,7 +130,7 @@ var xxx_messageInfo_FlowSchema proto.InternalMessageInfo
func (m *FlowSchemaCondition) Reset() { *m = FlowSchemaCondition{} }
func (*FlowSchemaCondition) ProtoMessage() {}
func (*FlowSchemaCondition) Descriptor() ([]byte, []int) {
- return fileDescriptor_ed300aa8e672704e, []int{2}
+ return fileDescriptor_ed300aa8e672704e, []int{3}
}
func (m *FlowSchemaCondition) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -130,7 +158,7 @@ var xxx_messageInfo_FlowSchemaCondition proto.InternalMessageInfo
func (m *FlowSchemaList) Reset() { *m = FlowSchemaList{} }
func (*FlowSchemaList) ProtoMessage() {}
func (*FlowSchemaList) Descriptor() ([]byte, []int) {
- return fileDescriptor_ed300aa8e672704e, []int{3}
+ return fileDescriptor_ed300aa8e672704e, []int{4}
}
func (m *FlowSchemaList) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -158,7 +186,7 @@ var xxx_messageInfo_FlowSchemaList proto.InternalMessageInfo
func (m *FlowSchemaSpec) Reset() { *m = FlowSchemaSpec{} }
func (*FlowSchemaSpec) ProtoMessage() {}
func (*FlowSchemaSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_ed300aa8e672704e, []int{4}
+ return fileDescriptor_ed300aa8e672704e, []int{5}
}
func (m *FlowSchemaSpec) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -186,7 +214,7 @@ var xxx_messageInfo_FlowSchemaSpec proto.InternalMessageInfo
func (m *FlowSchemaStatus) Reset() { *m = FlowSchemaStatus{} }
func (*FlowSchemaStatus) ProtoMessage() {}
func (*FlowSchemaStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_ed300aa8e672704e, []int{5}
+ return fileDescriptor_ed300aa8e672704e, []int{6}
}
func (m *FlowSchemaStatus) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -214,7 +242,7 @@ var xxx_messageInfo_FlowSchemaStatus proto.InternalMessageInfo
func (m *GroupSubject) Reset() { *m = GroupSubject{} }
func (*GroupSubject) ProtoMessage() {}
func (*GroupSubject) Descriptor() ([]byte, []int) {
- return fileDescriptor_ed300aa8e672704e, []int{6}
+ return fileDescriptor_ed300aa8e672704e, []int{7}
}
func (m *GroupSubject) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -242,7 +270,7 @@ var xxx_messageInfo_GroupSubject proto.InternalMessageInfo
func (m *LimitResponse) Reset() { *m = LimitResponse{} }
func (*LimitResponse) ProtoMessage() {}
func (*LimitResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_ed300aa8e672704e, []int{7}
+ return fileDescriptor_ed300aa8e672704e, []int{8}
}
func (m *LimitResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -270,7 +298,7 @@ var xxx_messageInfo_LimitResponse proto.InternalMessageInfo
func (m *LimitedPriorityLevelConfiguration) Reset() { *m = LimitedPriorityLevelConfiguration{} }
func (*LimitedPriorityLevelConfiguration) ProtoMessage() {}
func (*LimitedPriorityLevelConfiguration) Descriptor() ([]byte, []int) {
- return fileDescriptor_ed300aa8e672704e, []int{8}
+ return fileDescriptor_ed300aa8e672704e, []int{9}
}
func (m *LimitedPriorityLevelConfiguration) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -298,7 +326,7 @@ var xxx_messageInfo_LimitedPriorityLevelConfiguration proto.InternalMessageInfo
func (m *NonResourcePolicyRule) Reset() { *m = NonResourcePolicyRule{} }
func (*NonResourcePolicyRule) ProtoMessage() {}
func (*NonResourcePolicyRule) Descriptor() ([]byte, []int) {
- return fileDescriptor_ed300aa8e672704e, []int{9}
+ return fileDescriptor_ed300aa8e672704e, []int{10}
}
func (m *NonResourcePolicyRule) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -326,7 +354,7 @@ var xxx_messageInfo_NonResourcePolicyRule proto.InternalMessageInfo
func (m *PolicyRulesWithSubjects) Reset() { *m = PolicyRulesWithSubjects{} }
func (*PolicyRulesWithSubjects) ProtoMessage() {}
func (*PolicyRulesWithSubjects) Descriptor() ([]byte, []int) {
- return fileDescriptor_ed300aa8e672704e, []int{10}
+ return fileDescriptor_ed300aa8e672704e, []int{11}
}
func (m *PolicyRulesWithSubjects) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -354,7 +382,7 @@ var xxx_messageInfo_PolicyRulesWithSubjects proto.InternalMessageInfo
func (m *PriorityLevelConfiguration) Reset() { *m = PriorityLevelConfiguration{} }
func (*PriorityLevelConfiguration) ProtoMessage() {}
func (*PriorityLevelConfiguration) Descriptor() ([]byte, []int) {
- return fileDescriptor_ed300aa8e672704e, []int{11}
+ return fileDescriptor_ed300aa8e672704e, []int{12}
}
func (m *PriorityLevelConfiguration) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -382,7 +410,7 @@ var xxx_messageInfo_PriorityLevelConfiguration proto.InternalMessageInfo
func (m *PriorityLevelConfigurationCondition) Reset() { *m = PriorityLevelConfigurationCondition{} }
func (*PriorityLevelConfigurationCondition) ProtoMessage() {}
func (*PriorityLevelConfigurationCondition) Descriptor() ([]byte, []int) {
- return fileDescriptor_ed300aa8e672704e, []int{12}
+ return fileDescriptor_ed300aa8e672704e, []int{13}
}
func (m *PriorityLevelConfigurationCondition) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -410,7 +438,7 @@ var xxx_messageInfo_PriorityLevelConfigurationCondition proto.InternalMessageInf
func (m *PriorityLevelConfigurationList) Reset() { *m = PriorityLevelConfigurationList{} }
func (*PriorityLevelConfigurationList) ProtoMessage() {}
func (*PriorityLevelConfigurationList) Descriptor() ([]byte, []int) {
- return fileDescriptor_ed300aa8e672704e, []int{13}
+ return fileDescriptor_ed300aa8e672704e, []int{14}
}
func (m *PriorityLevelConfigurationList) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -438,7 +466,7 @@ var xxx_messageInfo_PriorityLevelConfigurationList proto.InternalMessageInfo
func (m *PriorityLevelConfigurationReference) Reset() { *m = PriorityLevelConfigurationReference{} }
func (*PriorityLevelConfigurationReference) ProtoMessage() {}
func (*PriorityLevelConfigurationReference) Descriptor() ([]byte, []int) {
- return fileDescriptor_ed300aa8e672704e, []int{14}
+ return fileDescriptor_ed300aa8e672704e, []int{15}
}
func (m *PriorityLevelConfigurationReference) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -466,7 +494,7 @@ var xxx_messageInfo_PriorityLevelConfigurationReference proto.InternalMessageInf
func (m *PriorityLevelConfigurationSpec) Reset() { *m = PriorityLevelConfigurationSpec{} }
func (*PriorityLevelConfigurationSpec) ProtoMessage() {}
func (*PriorityLevelConfigurationSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_ed300aa8e672704e, []int{15}
+ return fileDescriptor_ed300aa8e672704e, []int{16}
}
func (m *PriorityLevelConfigurationSpec) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -494,7 +522,7 @@ var xxx_messageInfo_PriorityLevelConfigurationSpec proto.InternalMessageInfo
func (m *PriorityLevelConfigurationStatus) Reset() { *m = PriorityLevelConfigurationStatus{} }
func (*PriorityLevelConfigurationStatus) ProtoMessage() {}
func (*PriorityLevelConfigurationStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_ed300aa8e672704e, []int{16}
+ return fileDescriptor_ed300aa8e672704e, []int{17}
}
func (m *PriorityLevelConfigurationStatus) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -522,7 +550,7 @@ var xxx_messageInfo_PriorityLevelConfigurationStatus proto.InternalMessageInfo
func (m *QueuingConfiguration) Reset() { *m = QueuingConfiguration{} }
func (*QueuingConfiguration) ProtoMessage() {}
func (*QueuingConfiguration) Descriptor() ([]byte, []int) {
- return fileDescriptor_ed300aa8e672704e, []int{17}
+ return fileDescriptor_ed300aa8e672704e, []int{18}
}
func (m *QueuingConfiguration) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -550,7 +578,7 @@ var xxx_messageInfo_QueuingConfiguration proto.InternalMessageInfo
func (m *ResourcePolicyRule) Reset() { *m = ResourcePolicyRule{} }
func (*ResourcePolicyRule) ProtoMessage() {}
func (*ResourcePolicyRule) Descriptor() ([]byte, []int) {
- return fileDescriptor_ed300aa8e672704e, []int{18}
+ return fileDescriptor_ed300aa8e672704e, []int{19}
}
func (m *ResourcePolicyRule) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -578,7 +606,7 @@ var xxx_messageInfo_ResourcePolicyRule proto.InternalMessageInfo
func (m *ServiceAccountSubject) Reset() { *m = ServiceAccountSubject{} }
func (*ServiceAccountSubject) ProtoMessage() {}
func (*ServiceAccountSubject) Descriptor() ([]byte, []int) {
- return fileDescriptor_ed300aa8e672704e, []int{19}
+ return fileDescriptor_ed300aa8e672704e, []int{20}
}
func (m *ServiceAccountSubject) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -606,7 +634,7 @@ var xxx_messageInfo_ServiceAccountSubject proto.InternalMessageInfo
func (m *Subject) Reset() { *m = Subject{} }
func (*Subject) ProtoMessage() {}
func (*Subject) Descriptor() ([]byte, []int) {
- return fileDescriptor_ed300aa8e672704e, []int{20}
+ return fileDescriptor_ed300aa8e672704e, []int{21}
}
func (m *Subject) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -634,7 +662,7 @@ var xxx_messageInfo_Subject proto.InternalMessageInfo
func (m *UserSubject) Reset() { *m = UserSubject{} }
func (*UserSubject) ProtoMessage() {}
func (*UserSubject) Descriptor() ([]byte, []int) {
- return fileDescriptor_ed300aa8e672704e, []int{21}
+ return fileDescriptor_ed300aa8e672704e, []int{22}
}
func (m *UserSubject) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -660,6 +688,7 @@ func (m *UserSubject) XXX_DiscardUnknown() {
var xxx_messageInfo_UserSubject proto.InternalMessageInfo
func init() {
+ proto.RegisterType((*ExemptPriorityLevelConfiguration)(nil), "k8s.io.api.flowcontrol.v1beta2.ExemptPriorityLevelConfiguration")
proto.RegisterType((*FlowDistinguisherMethod)(nil), "k8s.io.api.flowcontrol.v1beta2.FlowDistinguisherMethod")
proto.RegisterType((*FlowSchema)(nil), "k8s.io.api.flowcontrol.v1beta2.FlowSchema")
proto.RegisterType((*FlowSchemaCondition)(nil), "k8s.io.api.flowcontrol.v1beta2.FlowSchemaCondition")
@@ -689,105 +718,142 @@ func init() {
}
var fileDescriptor_ed300aa8e672704e = []byte{
- // 1554 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x58, 0x4f, 0x6f, 0xdb, 0xc6,
- 0x12, 0x37, 0x65, 0xc9, 0xb6, 0xc6, 0x7f, 0xb3, 0x8e, 0x61, 0x3d, 0x07, 0x90, 0x1c, 0x3e, 0xe0,
- 0xe5, 0xbd, 0x97, 0x84, 0x4a, 0xd2, 0xa4, 0x49, 0x5b, 0xf4, 0x8f, 0xe9, 0xb4, 0x69, 0x1a, 0xdb,
- 0x71, 0xd6, 0x49, 0x5b, 0xa4, 0x01, 0x1a, 0x8a, 0x5a, 0x4b, 0x8c, 0x25, 0x92, 0xd9, 0x25, 0x65,
- 0xb8, 0xb9, 0x14, 0xfd, 0x04, 0x3d, 0xb7, 0xc7, 0x1e, 0x7a, 0xef, 0x17, 0xe8, 0xb1, 0x41, 0x4f,
- 0x39, 0xe6, 0xa4, 0x36, 0xea, 0xa9, 0xdf, 0xa0, 0x0d, 0x50, 0xa0, 0xd8, 0xe5, 0x92, 0x14, 0xf5,
- 0x8f, 0x42, 0x02, 0xe4, 0xd4, 0x9b, 0x39, 0xf3, 0x9b, 0xdf, 0xec, 0xcc, 0xce, 0xcc, 0x8e, 0x0c,
- 0xd7, 0x0e, 0xae, 0x30, 0xcd, 0x72, 0xca, 0x07, 0x7e, 0x85, 0x50, 0x9b, 0x78, 0x84, 0x95, 0x5b,
- 0xc4, 0xae, 0x3a, 0xb4, 0x2c, 0x15, 0x86, 0x6b, 0x95, 0xf7, 0x1b, 0xce, 0xa1, 0xe9, 0xd8, 0x1e,
- 0x75, 0x1a, 0xe5, 0xd6, 0xf9, 0x0a, 0xf1, 0x8c, 0x0b, 0xe5, 0x1a, 0xb1, 0x09, 0x35, 0x3c, 0x52,
- 0xd5, 0x5c, 0xea, 0x78, 0x0e, 0x2a, 0x06, 0x78, 0xcd, 0x70, 0x2d, 0xad, 0x0b, 0xaf, 0x49, 0xfc,
- 0xda, 0xd9, 0x9a, 0xe5, 0xd5, 0xfd, 0x8a, 0x66, 0x3a, 0xcd, 0x72, 0xcd, 0xa9, 0x39, 0x65, 0x61,
- 0x56, 0xf1, 0xf7, 0xc5, 0x97, 0xf8, 0x10, 0x7f, 0x05, 0x74, 0x6b, 0x17, 0x63, 0xf7, 0x4d, 0xc3,
- 0xac, 0x5b, 0x36, 0xa1, 0x47, 0x65, 0xf7, 0xa0, 0xc6, 0x05, 0xac, 0xdc, 0x24, 0x9e, 0x51, 0x6e,
- 0x9d, 0xef, 0x3d, 0xc4, 0x5a, 0x79, 0x98, 0x15, 0xf5, 0x6d, 0xcf, 0x6a, 0x92, 0x3e, 0x83, 0xd7,
- 0xd3, 0x0c, 0x98, 0x59, 0x27, 0x4d, 0xa3, 0xd7, 0x4e, 0xbd, 0x0b, 0xab, 0x1f, 0x34, 0x9c, 0xc3,
- 0xab, 0x16, 0xf3, 0x2c, 0xbb, 0xe6, 0x5b, 0xac, 0x4e, 0xe8, 0x36, 0xf1, 0xea, 0x4e, 0x15, 0xbd,
- 0x0b, 0x59, 0xef, 0xc8, 0x25, 0x05, 0x65, 0x5d, 0xf9, 0x6f, 0x5e, 0x3f, 0xfd, 0xb8, 0x5d, 0x9a,
- 0xe8, 0xb4, 0x4b, 0xd9, 0xdb, 0x47, 0x2e, 0x79, 0xde, 0x2e, 0x9d, 0x18, 0x62, 0xc6, 0xd5, 0x58,
- 0x18, 0xaa, 0xdf, 0x64, 0x00, 0x38, 0x6a, 0x4f, 0xb8, 0x46, 0xf7, 0x61, 0x86, 0x87, 0x5b, 0x35,
- 0x3c, 0x43, 0x70, 0xce, 0x5e, 0x38, 0xa7, 0xc5, 0xb9, 0x8e, 0x4e, 0xad, 0xb9, 0x07, 0x35, 0x2e,
- 0x60, 0x1a, 0x47, 0x6b, 0xad, 0xf3, 0xda, 0xcd, 0xca, 0x03, 0x62, 0x7a, 0xdb, 0xc4, 0x33, 0x74,
- 0x24, 0x4f, 0x01, 0xb1, 0x0c, 0x47, 0xac, 0x68, 0x17, 0xb2, 0xcc, 0x25, 0x66, 0x21, 0x23, 0xd8,
- 0x35, 0x6d, 0xf4, 0x4d, 0x6a, 0xf1, 0xd9, 0xf6, 0x5c, 0x62, 0xea, 0x73, 0x61, 0x84, 0xfc, 0x0b,
- 0x0b, 0x26, 0xf4, 0x29, 0x4c, 0x31, 0xcf, 0xf0, 0x7c, 0x56, 0x98, 0xec, 0x3b, 0x71, 0x1a, 0xa7,
- 0xb0, 0xd3, 0x17, 0x24, 0xeb, 0x54, 0xf0, 0x8d, 0x25, 0x9f, 0xfa, 0x34, 0x03, 0xcb, 0x31, 0x78,
- 0xd3, 0xb1, 0xab, 0x96, 0x67, 0x39, 0x36, 0x7a, 0x2b, 0x91, 0xf5, 0x53, 0x3d, 0x59, 0x5f, 0x1d,
- 0x60, 0x12, 0x67, 0x1c, 0xbd, 0x11, 0x1d, 0x37, 0x23, 0xcc, 0x4f, 0x26, 0x9d, 0x3f, 0x6f, 0x97,
- 0x16, 0x23, 0xb3, 0xe4, 0x79, 0x50, 0x0b, 0x50, 0xc3, 0x60, 0xde, 0x6d, 0x6a, 0xd8, 0x2c, 0xa0,
- 0xb5, 0x9a, 0x44, 0x46, 0xfd, 0xff, 0xf1, 0xee, 0x89, 0x5b, 0xe8, 0x6b, 0xd2, 0x25, 0xda, 0xea,
- 0x63, 0xc3, 0x03, 0x3c, 0xa0, 0xff, 0xc0, 0x14, 0x25, 0x06, 0x73, 0xec, 0x42, 0x56, 0x1c, 0x39,
- 0xca, 0x17, 0x16, 0x52, 0x2c, 0xb5, 0xe8, 0x7f, 0x30, 0xdd, 0x24, 0x8c, 0x19, 0x35, 0x52, 0xc8,
- 0x09, 0xe0, 0xa2, 0x04, 0x4e, 0x6f, 0x07, 0x62, 0x1c, 0xea, 0xd5, 0x1f, 0x15, 0x58, 0x88, 0xf3,
- 0xb4, 0x65, 0x31, 0x0f, 0xdd, 0xeb, 0xab, 0x3d, 0x6d, 0xbc, 0x98, 0xb8, 0xb5, 0xa8, 0xbc, 0x25,
- 0xe9, 0x6e, 0x26, 0x94, 0x74, 0xd5, 0xdd, 0x4d, 0xc8, 0x59, 0x1e, 0x69, 0xf2, 0xac, 0x4f, 0xf6,
- 0xa4, 0x2b, 0xa5, 0x48, 0xf4, 0x79, 0x49, 0x9b, 0xbb, 0xce, 0x09, 0x70, 0xc0, 0xa3, 0xfe, 0x3e,
- 0xd9, 0x1d, 0x01, 0xaf, 0x47, 0xf4, 0xbd, 0x02, 0x6b, 0x2e, 0xb5, 0x1c, 0x6a, 0x79, 0x47, 0x5b,
- 0xa4, 0x45, 0x1a, 0x9b, 0x8e, 0xbd, 0x6f, 0xd5, 0x7c, 0x6a, 0xf0, 0x54, 0xca, 0xa0, 0x36, 0xd3,
- 0x3c, 0xef, 0x0e, 0x65, 0xc0, 0x64, 0x9f, 0x50, 0x62, 0x9b, 0x44, 0x57, 0xe5, 0x91, 0xd6, 0x46,
- 0x80, 0x47, 0x1c, 0x05, 0x7d, 0x04, 0xa8, 0x69, 0x78, 0x3c, 0xa3, 0xb5, 0x5d, 0x4a, 0x4c, 0x52,
- 0xe5, 0xac, 0xa2, 0x20, 0x73, 0x71, 0x75, 0x6c, 0xf7, 0x21, 0xf0, 0x00, 0x2b, 0xf4, 0x95, 0x02,
- 0xcb, 0xd5, 0xfe, 0x21, 0x23, 0xeb, 0xf2, 0xf2, 0x38, 0x89, 0x1e, 0x30, 0xa3, 0xf4, 0xd5, 0x4e,
- 0xbb, 0xb4, 0x3c, 0x40, 0x81, 0x07, 0x39, 0x43, 0xf7, 0x20, 0x47, 0xfd, 0x06, 0x61, 0x85, 0xac,
- 0xb8, 0xde, 0x54, 0xaf, 0xbb, 0x4e, 0xc3, 0x32, 0x8f, 0x30, 0x37, 0xf9, 0xc4, 0xf2, 0xea, 0x7b,
- 0xbe, 0x98, 0x55, 0x2c, 0xbe, 0x6b, 0xa1, 0xc2, 0x01, 0xa9, 0xfa, 0x08, 0x96, 0x7a, 0x87, 0x06,
- 0xaa, 0x01, 0x98, 0x61, 0x9f, 0xb2, 0x82, 0x22, 0xdc, 0xbe, 0x36, 0x7e, 0x55, 0x45, 0x3d, 0x1e,
- 0xcf, 0xcb, 0x48, 0xc4, 0x70, 0x17, 0xb5, 0x7a, 0x0e, 0xe6, 0xae, 0x51, 0xc7, 0x77, 0xe5, 0x19,
- 0xd1, 0x3a, 0x64, 0x6d, 0xa3, 0x19, 0x4e, 0x9f, 0x68, 0x22, 0xee, 0x18, 0x4d, 0x82, 0x85, 0x46,
- 0xfd, 0x4e, 0x81, 0xf9, 0x2d, 0xab, 0x69, 0x79, 0x98, 0x30, 0xd7, 0xb1, 0x19, 0x41, 0x97, 0x12,
- 0x13, 0xeb, 0x64, 0xcf, 0xc4, 0x3a, 0x96, 0x00, 0x77, 0xcd, 0xaa, 0xcf, 0x60, 0xfa, 0xa1, 0x4f,
- 0x7c, 0xcb, 0xae, 0xc9, 0x79, 0x7d, 0x31, 0x2d, 0xc0, 0x5b, 0x01, 0x3c, 0x51, 0x6d, 0xfa, 0x2c,
- 0x1f, 0x01, 0x52, 0x83, 0x43, 0x46, 0xf5, 0xaf, 0x0c, 0x9c, 0x14, 0x8e, 0x49, 0x75, 0x78, 0x15,
- 0xa3, 0x7b, 0x50, 0x30, 0x18, 0xf3, 0x29, 0xa9, 0x6e, 0x3a, 0xb6, 0xe9, 0x53, 0x5e, 0xff, 0x47,
- 0x7b, 0x75, 0x83, 0x12, 0x26, 0xa2, 0xc9, 0xe9, 0xeb, 0x32, 0x9a, 0xc2, 0xc6, 0x10, 0x1c, 0x1e,
- 0xca, 0x80, 0x1e, 0xc0, 0x7c, 0xa3, 0x3b, 0x76, 0x19, 0xe6, 0xd9, 0xb4, 0x30, 0x13, 0x09, 0xd3,
- 0x57, 0xe4, 0x09, 0x92, 0x49, 0xc7, 0x49, 0x6a, 0xf4, 0x36, 0x2c, 0x36, 0x88, 0x5d, 0x35, 0x2a,
- 0x0d, 0xb2, 0x4b, 0xa8, 0x49, 0x6c, 0x4f, 0xb4, 0x48, 0x4e, 0x5f, 0xee, 0xb4, 0x4b, 0x8b, 0x5b,
- 0x49, 0x15, 0xee, 0xc5, 0xa2, 0x9b, 0xb0, 0x52, 0x71, 0x28, 0x75, 0x0e, 0x2d, 0xbb, 0x26, 0xfc,
- 0x84, 0x24, 0x59, 0x41, 0xf2, 0xaf, 0x4e, 0xbb, 0xb4, 0xa2, 0x0f, 0x02, 0xe0, 0xc1, 0x76, 0xea,
- 0x21, 0xac, 0xec, 0xf0, 0x99, 0xc2, 0x1c, 0x9f, 0x9a, 0x24, 0x6e, 0x08, 0x54, 0x82, 0x5c, 0x8b,
- 0xd0, 0x4a, 0x50, 0xd4, 0x79, 0x3d, 0xcf, 0xdb, 0xe1, 0x63, 0x2e, 0xc0, 0x81, 0x9c, 0x47, 0x62,
- 0xc7, 0x96, 0x77, 0xf0, 0x16, 0x2b, 0x4c, 0x09, 0xa8, 0x88, 0x64, 0x27, 0xa9, 0xc2, 0xbd, 0x58,
- 0xb5, 0x9d, 0x81, 0xd5, 0x21, 0xfd, 0x87, 0xee, 0xc0, 0x0c, 0x93, 0x7f, 0xcb, 0x9e, 0x3a, 0x95,
- 0x76, 0x17, 0xd2, 0x36, 0x9e, 0xfe, 0x21, 0x19, 0x8e, 0xa8, 0x90, 0x03, 0xf3, 0x54, 0x1e, 0x41,
- 0xf8, 0x94, 0xaf, 0xc0, 0x85, 0x34, 0xee, 0xfe, 0xec, 0xc4, 0x97, 0x8d, 0xbb, 0x09, 0x71, 0x92,
- 0x1f, 0x3d, 0x82, 0xa5, 0xae, 0xb0, 0x03, 0x9f, 0x93, 0xc2, 0xe7, 0xa5, 0x34, 0x9f, 0x03, 0x2f,
- 0x45, 0x2f, 0x48, 0xb7, 0x4b, 0x3b, 0x3d, 0xb4, 0xb8, 0xcf, 0x91, 0xfa, 0x73, 0x06, 0x46, 0x3c,
- 0x0c, 0xaf, 0x60, 0xc9, 0xbb, 0x9f, 0x58, 0xf2, 0xde, 0x79, 0xf1, 0x17, 0x6f, 0xe8, 0xd2, 0x57,
- 0xef, 0x59, 0xfa, 0xde, 0x7b, 0x09, 0x1f, 0xa3, 0x97, 0xc0, 0x3f, 0x32, 0xf0, 0xef, 0xe1, 0xc6,
- 0xf1, 0x52, 0x78, 0x23, 0x31, 0x62, 0x2f, 0xf7, 0x8c, 0xd8, 0x53, 0x63, 0x50, 0xfc, 0xb3, 0x24,
- 0xf6, 0x2c, 0x89, 0xbf, 0x28, 0x50, 0x1c, 0x9e, 0xb7, 0x57, 0xb0, 0x34, 0x7e, 0x9e, 0x5c, 0x1a,
- 0xdf, 0x7c, 0xf1, 0x22, 0x1b, 0xb2, 0x44, 0x5e, 0x1b, 0x55, 0x5b, 0xd1, 0xba, 0x37, 0xc6, 0x93,
- 0xff, 0xd3, 0xc8, 0x54, 0x89, 0xed, 0x34, 0xe5, 0x57, 0x4b, 0xc2, 0xfa, 0x7d, 0x9b, 0x3f, 0x3d,
- 0x4d, 0xfe, 0x7a, 0x04, 0x05, 0x59, 0x87, 0xe9, 0x46, 0xf0, 0x56, 0xcb, 0xa6, 0xde, 0x18, 0xeb,
- 0x89, 0x1c, 0xf5, 0xb4, 0x07, 0x6b, 0x81, 0x84, 0xe1, 0x90, 0x5e, 0xfd, 0x56, 0x81, 0xf5, 0xb4,
- 0x66, 0x45, 0x87, 0x03, 0x96, 0xaf, 0x97, 0x58, 0xac, 0xc7, 0x5f, 0xc6, 0x7e, 0x50, 0xe0, 0xf8,
- 0xa0, 0x1d, 0x87, 0x97, 0x3f, 0x5f, 0x6c, 0xa2, 0xad, 0x24, 0x2a, 0xff, 0x5b, 0x42, 0x8a, 0xa5,
- 0x16, 0x9d, 0x81, 0x99, 0xba, 0x61, 0x57, 0xf7, 0xac, 0x2f, 0xc2, 0x7d, 0x3b, 0x2a, 0xc0, 0x0f,
- 0xa5, 0x1c, 0x47, 0x08, 0x74, 0x15, 0x96, 0x84, 0xdd, 0x16, 0xb1, 0x6b, 0x5e, 0x5d, 0xe4, 0x4a,
- 0x2e, 0x0d, 0xd1, 0x7b, 0x70, 0xab, 0x47, 0x8f, 0xfb, 0x2c, 0xd4, 0x3f, 0x15, 0x40, 0x2f, 0xf2,
- 0xce, 0x9f, 0x86, 0xbc, 0xe1, 0x5a, 0x62, 0xf9, 0x0c, 0x5a, 0x20, 0xaf, 0xcf, 0x77, 0xda, 0xa5,
- 0xfc, 0xc6, 0xee, 0xf5, 0x40, 0x88, 0x63, 0x3d, 0x07, 0x87, 0x4f, 0x60, 0xf0, 0xd4, 0x49, 0x70,
- 0xe8, 0x98, 0xe1, 0x58, 0x8f, 0xae, 0xc0, 0x9c, 0xd9, 0xf0, 0x99, 0x47, 0xe8, 0x9e, 0xe9, 0xb8,
- 0x44, 0x8c, 0x8c, 0x19, 0xfd, 0xb8, 0x8c, 0x69, 0x6e, 0xb3, 0x4b, 0x87, 0x13, 0x48, 0xa4, 0x01,
- 0xf0, 0x82, 0x67, 0xae, 0xc1, 0xfd, 0xe4, 0x84, 0x9f, 0x05, 0x7e, 0x61, 0x3b, 0x91, 0x14, 0x77,
- 0x21, 0xd4, 0x07, 0xb0, 0xb2, 0x47, 0x68, 0xcb, 0x32, 0xc9, 0x86, 0x69, 0x3a, 0xbe, 0xed, 0x85,
- 0x6b, 0x74, 0x19, 0xf2, 0x11, 0x4c, 0xf6, 0xc4, 0x31, 0xe9, 0x3f, 0x1f, 0x71, 0xe1, 0x18, 0x13,
- 0x35, 0x61, 0x66, 0x78, 0x13, 0x66, 0x60, 0x3a, 0xa6, 0xcf, 0x1e, 0x58, 0x76, 0x55, 0x32, 0x9f,
- 0x08, 0xd1, 0x37, 0x2c, 0xbb, 0xfa, 0xbc, 0x5d, 0x9a, 0x95, 0x30, 0xfe, 0x89, 0x05, 0x10, 0x5d,
- 0x87, 0xac, 0xcf, 0x08, 0x95, 0xed, 0x75, 0x3a, 0xad, 0x98, 0xef, 0x30, 0x42, 0xc3, 0xcd, 0x67,
- 0x86, 0x33, 0x73, 0x01, 0x16, 0x14, 0x68, 0x1b, 0x72, 0x35, 0x7e, 0x29, 0x72, 0xea, 0x9f, 0x49,
- 0xe3, 0xea, 0xfe, 0x79, 0x11, 0x94, 0x81, 0x90, 0xe0, 0x80, 0x05, 0x3d, 0x84, 0x05, 0x96, 0x48,
- 0xa1, 0xb8, 0xae, 0x31, 0x36, 0x99, 0x81, 0x89, 0xd7, 0x51, 0xa7, 0x5d, 0x5a, 0x48, 0xaa, 0x70,
- 0x8f, 0x03, 0xb5, 0x0c, 0xb3, 0x5d, 0x01, 0xa6, 0xcf, 0x3f, 0xfd, 0xea, 0xe3, 0x67, 0xc5, 0x89,
- 0x27, 0xcf, 0x8a, 0x13, 0x4f, 0x9f, 0x15, 0x27, 0xbe, 0xec, 0x14, 0x95, 0xc7, 0x9d, 0xa2, 0xf2,
- 0xa4, 0x53, 0x54, 0x9e, 0x76, 0x8a, 0xca, 0xaf, 0x9d, 0xa2, 0xf2, 0xf5, 0x6f, 0xc5, 0x89, 0xbb,
- 0xc5, 0xd1, 0xff, 0x67, 0xfc, 0x3b, 0x00, 0x00, 0xff, 0xff, 0x87, 0x72, 0xbf, 0xe2, 0xa1, 0x14,
- 0x00, 0x00,
+ // 1617 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x58, 0x4b, 0x73, 0x1b, 0xc5,
+ 0x16, 0xf6, 0xc8, 0x92, 0x6d, 0x1d, 0x3f, 0xd3, 0x8e, 0xcb, 0xba, 0xce, 0x2d, 0xc9, 0x99, 0x5b,
+ 0x75, 0x73, 0x2f, 0x49, 0x46, 0x89, 0x49, 0x48, 0x80, 0xe2, 0xe1, 0x71, 0x42, 0x08, 0xb1, 0x1d,
+ 0xa7, 0x9d, 0x40, 0x2a, 0xa4, 0x8a, 0x8c, 0x46, 0x6d, 0x69, 0x62, 0x69, 0x66, 0xd2, 0x3d, 0x23,
+ 0x63, 0xb2, 0xa1, 0xf8, 0x05, 0xac, 0x61, 0xc9, 0x82, 0x15, 0x1b, 0xb6, 0x2c, 0x58, 0x92, 0x62,
+ 0x95, 0x65, 0x56, 0x82, 0x88, 0x15, 0xff, 0x00, 0x52, 0x45, 0x15, 0xd5, 0x3d, 0xad, 0x19, 0x8d,
+ 0x5e, 0xa3, 0x4a, 0xaa, 0xb2, 0x62, 0xe7, 0x39, 0xe7, 0x3b, 0xdf, 0xe9, 0x3e, 0x7d, 0x5e, 0x32,
+ 0x5c, 0xd9, 0xbf, 0xc8, 0x34, 0xcb, 0x29, 0xee, 0xfb, 0x25, 0x42, 0x6d, 0xe2, 0x11, 0x56, 0x6c,
+ 0x10, 0xbb, 0xec, 0xd0, 0xa2, 0x54, 0x18, 0xae, 0x55, 0xdc, 0xab, 0x39, 0x07, 0xa6, 0x63, 0x7b,
+ 0xd4, 0xa9, 0x15, 0x1b, 0x67, 0x4b, 0xc4, 0x33, 0xd6, 0x8a, 0x15, 0x62, 0x13, 0x6a, 0x78, 0xa4,
+ 0xac, 0xb9, 0xd4, 0xf1, 0x1c, 0x94, 0x0f, 0xf0, 0x9a, 0xe1, 0x5a, 0x5a, 0x07, 0x5e, 0x93, 0xf8,
+ 0x95, 0xd3, 0x15, 0xcb, 0xab, 0xfa, 0x25, 0xcd, 0x74, 0xea, 0xc5, 0x8a, 0x53, 0x71, 0x8a, 0xc2,
+ 0xac, 0xe4, 0xef, 0x89, 0x2f, 0xf1, 0x21, 0xfe, 0x0a, 0xe8, 0x56, 0xce, 0x45, 0xee, 0xeb, 0x86,
+ 0x59, 0xb5, 0x6c, 0x42, 0x0f, 0x8b, 0xee, 0x7e, 0x85, 0x0b, 0x58, 0xb1, 0x4e, 0x3c, 0xa3, 0xd8,
+ 0x38, 0xdb, 0x7d, 0x88, 0x95, 0xe2, 0x20, 0x2b, 0xea, 0xdb, 0x9e, 0x55, 0x27, 0x3d, 0x06, 0xaf,
+ 0x25, 0x19, 0x30, 0xb3, 0x4a, 0xea, 0x46, 0xb7, 0x9d, 0xfa, 0x83, 0x02, 0xab, 0x97, 0x3f, 0x25,
+ 0x75, 0xd7, 0xdb, 0xa1, 0x96, 0x43, 0x2d, 0xef, 0x70, 0x93, 0x34, 0x48, 0x6d, 0xc3, 0xb1, 0xf7,
+ 0xac, 0x8a, 0x4f, 0x0d, 0xcf, 0x72, 0x6c, 0x74, 0x1b, 0x72, 0xb6, 0x53, 0xb7, 0x6c, 0x83, 0xcb,
+ 0x4d, 0x9f, 0x52, 0x62, 0x9b, 0x87, 0xbb, 0x55, 0x83, 0x12, 0x96, 0x53, 0x56, 0x95, 0xff, 0x65,
+ 0xf4, 0x7f, 0xb7, 0x9a, 0x85, 0xdc, 0xf6, 0x00, 0x0c, 0x1e, 0x68, 0x8d, 0xde, 0x82, 0xf9, 0x1a,
+ 0xb1, 0xcb, 0x46, 0xa9, 0x46, 0x76, 0x08, 0x35, 0x89, 0xed, 0xe5, 0x52, 0x82, 0x70, 0xb1, 0xd5,
+ 0x2c, 0xcc, 0x6f, 0xc6, 0x55, 0xb8, 0x1b, 0xab, 0xde, 0x81, 0xe5, 0xf7, 0x6a, 0xce, 0xc1, 0x25,
+ 0x8b, 0x79, 0x96, 0x5d, 0xf1, 0x2d, 0x56, 0x25, 0x74, 0x8b, 0x78, 0x55, 0xa7, 0x8c, 0xde, 0x81,
+ 0xb4, 0x77, 0xe8, 0x12, 0x71, 0xbe, 0xac, 0x7e, 0xf2, 0x51, 0xb3, 0x30, 0xd6, 0x6a, 0x16, 0xd2,
+ 0x37, 0x0f, 0x5d, 0xf2, 0xac, 0x59, 0x38, 0x36, 0xc0, 0x8c, 0xab, 0xb1, 0x30, 0x54, 0xbf, 0x4a,
+ 0x01, 0x70, 0xd4, 0xae, 0x08, 0x1c, 0xba, 0x07, 0x53, 0xfc, 0xb1, 0xca, 0x86, 0x67, 0x08, 0xce,
+ 0xe9, 0xb5, 0x33, 0x5a, 0x94, 0x29, 0x61, 0xcc, 0x35, 0x77, 0xbf, 0xc2, 0x05, 0x4c, 0xe3, 0x68,
+ 0xad, 0x71, 0x56, 0xbb, 0x5e, 0xba, 0x4f, 0x4c, 0x6f, 0x8b, 0x78, 0x86, 0x8e, 0xe4, 0x29, 0x20,
+ 0x92, 0xe1, 0x90, 0x15, 0xed, 0x40, 0x9a, 0xb9, 0xc4, 0x14, 0x01, 0x98, 0x5e, 0xd3, 0xb4, 0xe1,
+ 0x79, 0xa8, 0x45, 0x67, 0xdb, 0x75, 0x89, 0xa9, 0xcf, 0xb4, 0x6f, 0xc8, 0xbf, 0xb0, 0x60, 0x42,
+ 0xb7, 0x61, 0x82, 0x79, 0x86, 0xe7, 0xb3, 0xdc, 0x78, 0xcf, 0x89, 0x93, 0x38, 0x85, 0x9d, 0x3e,
+ 0x27, 0x59, 0x27, 0x82, 0x6f, 0x2c, 0xf9, 0xd4, 0x27, 0x29, 0x58, 0x8c, 0xc0, 0x1b, 0x8e, 0x5d,
+ 0xb6, 0x44, 0xa6, 0xbc, 0x19, 0x8b, 0xfa, 0x89, 0xae, 0xa8, 0x2f, 0xf7, 0x31, 0x89, 0x22, 0x8e,
+ 0x5e, 0x0f, 0x8f, 0x9b, 0x12, 0xe6, 0xc7, 0xe3, 0xce, 0x9f, 0x35, 0x0b, 0xf3, 0xa1, 0x59, 0xfc,
+ 0x3c, 0xa8, 0x01, 0xa8, 0x66, 0x30, 0xef, 0x26, 0x35, 0x6c, 0x16, 0xd0, 0x5a, 0x75, 0x22, 0x6f,
+ 0xfd, 0xca, 0x68, 0xef, 0xc4, 0x2d, 0xf4, 0x15, 0xe9, 0x12, 0x6d, 0xf6, 0xb0, 0xe1, 0x3e, 0x1e,
+ 0xd0, 0x7f, 0x61, 0x82, 0x12, 0x83, 0x39, 0x76, 0x2e, 0x2d, 0x8e, 0x1c, 0xc6, 0x0b, 0x0b, 0x29,
+ 0x96, 0x5a, 0xf4, 0x7f, 0x98, 0xac, 0x13, 0xc6, 0x8c, 0x0a, 0xc9, 0x65, 0x04, 0x70, 0x5e, 0x02,
+ 0x27, 0xb7, 0x02, 0x31, 0x6e, 0xeb, 0xd5, 0x1f, 0x15, 0x98, 0x8b, 0xe2, 0xb4, 0x69, 0x31, 0x0f,
+ 0xdd, 0xed, 0xc9, 0x3d, 0x6d, 0xb4, 0x3b, 0x71, 0x6b, 0x91, 0x79, 0x0b, 0xd2, 0xdd, 0x54, 0x5b,
+ 0xd2, 0x91, 0x77, 0xd7, 0x21, 0x63, 0x79, 0xa4, 0xce, 0xa3, 0x3e, 0xde, 0x15, 0xae, 0x84, 0x24,
+ 0xd1, 0x67, 0x25, 0x6d, 0xe6, 0x2a, 0x27, 0xc0, 0x01, 0x8f, 0xfa, 0xfb, 0x78, 0xe7, 0x0d, 0x78,
+ 0x3e, 0xa2, 0x6f, 0x15, 0x58, 0x71, 0x07, 0x36, 0x18, 0x79, 0xa9, 0x8d, 0x24, 0xcf, 0x83, 0x5b,
+ 0x14, 0x26, 0x7b, 0x84, 0xf7, 0x15, 0xa2, 0xab, 0xf2, 0x48, 0x2b, 0x43, 0xc0, 0x43, 0x8e, 0x82,
+ 0x3e, 0x00, 0x54, 0x37, 0x3c, 0x1e, 0xd1, 0xca, 0x0e, 0x25, 0x26, 0x29, 0x73, 0x56, 0xd9, 0x94,
+ 0xc2, 0xec, 0xd8, 0xea, 0x41, 0xe0, 0x3e, 0x56, 0xe8, 0x0b, 0x05, 0x16, 0xcb, 0xbd, 0x4d, 0x46,
+ 0xe6, 0xe5, 0x85, 0x51, 0x02, 0xdd, 0xa7, 0x47, 0xe9, 0xcb, 0xad, 0x66, 0x61, 0xb1, 0x8f, 0x02,
+ 0xf7, 0x73, 0x86, 0xee, 0x42, 0x86, 0xfa, 0x35, 0xc2, 0x72, 0x69, 0xf1, 0xbc, 0x89, 0x5e, 0x77,
+ 0x9c, 0x9a, 0x65, 0x1e, 0x62, 0x6e, 0xf2, 0x91, 0xe5, 0x55, 0x77, 0x7d, 0xd1, 0xab, 0x58, 0xf4,
+ 0xd6, 0x42, 0x85, 0x03, 0x52, 0xf5, 0x21, 0x2c, 0x74, 0x37, 0x0d, 0x54, 0x01, 0x30, 0xdb, 0x75,
+ 0xca, 0x07, 0x04, 0x77, 0xfb, 0xea, 0xe8, 0x59, 0x15, 0xd6, 0x78, 0xd4, 0x2f, 0x43, 0x11, 0xc3,
+ 0x1d, 0xd4, 0xea, 0x19, 0x98, 0xb9, 0x42, 0x1d, 0xdf, 0x95, 0x67, 0x44, 0xab, 0x90, 0xb6, 0x8d,
+ 0x7a, 0xbb, 0xfb, 0x84, 0x1d, 0x71, 0xdb, 0xa8, 0x13, 0x2c, 0x34, 0xea, 0x37, 0x0a, 0xcc, 0x6e,
+ 0x5a, 0x75, 0xcb, 0xc3, 0x84, 0xb9, 0x8e, 0xcd, 0x08, 0x3a, 0x1f, 0xeb, 0x58, 0xc7, 0xbb, 0x3a,
+ 0xd6, 0x91, 0x18, 0xb8, 0xa3, 0x57, 0x7d, 0x0c, 0x93, 0x0f, 0x7c, 0xe2, 0x5b, 0x76, 0x45, 0xf6,
+ 0xeb, 0x73, 0x49, 0x17, 0xbc, 0x11, 0xc0, 0x63, 0xd9, 0xa6, 0x4f, 0xf3, 0x16, 0x20, 0x35, 0xb8,
+ 0xcd, 0xa8, 0xfe, 0x95, 0x82, 0xe3, 0xc2, 0x31, 0x29, 0x0f, 0x99, 0xca, 0x77, 0x21, 0x67, 0x30,
+ 0xe6, 0x53, 0x52, 0x1e, 0x34, 0x95, 0x57, 0xe5, 0x6d, 0x72, 0xeb, 0x03, 0x70, 0x78, 0x20, 0x03,
+ 0xba, 0x0f, 0xb3, 0xb5, 0xce, 0xbb, 0xcb, 0x6b, 0x9e, 0x4e, 0xba, 0x66, 0x2c, 0x60, 0xfa, 0x92,
+ 0x3c, 0x41, 0x3c, 0xe8, 0x38, 0x4e, 0xdd, 0x6f, 0x0b, 0x18, 0x1f, 0x7d, 0x0b, 0x40, 0xd7, 0x61,
+ 0xa9, 0xe4, 0x50, 0xea, 0x1c, 0x58, 0x76, 0x45, 0xf8, 0x69, 0x93, 0xa4, 0x05, 0xc9, 0xbf, 0x5a,
+ 0xcd, 0xc2, 0x92, 0xde, 0x0f, 0x80, 0xfb, 0xdb, 0xa9, 0x07, 0xb0, 0xb4, 0xcd, 0x7b, 0x0a, 0x73,
+ 0x7c, 0x6a, 0x92, 0xa8, 0x20, 0x50, 0x01, 0x32, 0x0d, 0x42, 0x4b, 0x41, 0x52, 0x67, 0xf5, 0x2c,
+ 0x2f, 0x87, 0x0f, 0xb9, 0x00, 0x07, 0x72, 0x7e, 0x13, 0x3b, 0xb2, 0xbc, 0x85, 0x37, 0x59, 0x6e,
+ 0x42, 0x40, 0xc5, 0x4d, 0xb6, 0xe3, 0x2a, 0xdc, 0x8d, 0x55, 0x9b, 0x29, 0x58, 0x1e, 0x50, 0x7f,
+ 0xe8, 0x16, 0x4c, 0x31, 0xf9, 0xb7, 0xac, 0xa9, 0x13, 0x49, 0x6f, 0x21, 0x6d, 0xa3, 0xee, 0xdf,
+ 0x26, 0xc3, 0x21, 0x15, 0x72, 0x60, 0x96, 0xca, 0x23, 0x08, 0x9f, 0x72, 0x0a, 0xac, 0x25, 0x71,
+ 0xf7, 0x46, 0x27, 0x7a, 0x6c, 0xdc, 0x49, 0x88, 0xe3, 0xfc, 0xe8, 0x21, 0x2c, 0x74, 0x5c, 0x3b,
+ 0xf0, 0x39, 0x2e, 0x7c, 0x9e, 0x4f, 0xf2, 0xd9, 0xf7, 0x51, 0xf4, 0x9c, 0x74, 0xbb, 0xb0, 0xdd,
+ 0x45, 0x8b, 0x7b, 0x1c, 0xa9, 0x3f, 0xa7, 0x60, 0xc8, 0x60, 0x78, 0x09, 0x4b, 0xde, 0xbd, 0xd8,
+ 0x92, 0xf7, 0xf6, 0xf3, 0x4f, 0xbc, 0x81, 0x4b, 0x5f, 0xb5, 0x6b, 0xe9, 0x7b, 0xf7, 0x05, 0x7c,
+ 0x0c, 0x5f, 0x02, 0xff, 0x48, 0xc1, 0x7f, 0x06, 0x1b, 0x47, 0x4b, 0xe1, 0xb5, 0x58, 0x8b, 0xbd,
+ 0xd0, 0xd5, 0x62, 0x4f, 0x8c, 0x40, 0xf1, 0xcf, 0x92, 0xd8, 0xb5, 0x24, 0xfe, 0xa2, 0x40, 0x7e,
+ 0x70, 0xdc, 0x5e, 0xc2, 0xd2, 0xf8, 0x49, 0x7c, 0x69, 0x7c, 0xe3, 0xf9, 0x93, 0x6c, 0xc0, 0x12,
+ 0x79, 0x65, 0x58, 0x6e, 0x85, 0xeb, 0xde, 0x08, 0x23, 0xff, 0xbb, 0xd4, 0xb0, 0x50, 0x89, 0xed,
+ 0x34, 0xe1, 0x57, 0x4b, 0xcc, 0xfa, 0xb2, 0xcd, 0x47, 0x4f, 0x9d, 0x4f, 0x8f, 0x20, 0x21, 0xab,
+ 0x30, 0x59, 0x0b, 0x66, 0xb5, 0x2c, 0xea, 0xf5, 0x91, 0x46, 0xe4, 0xb0, 0xd1, 0x1e, 0xac, 0x05,
+ 0x12, 0x86, 0xdb, 0xf4, 0xa8, 0x0c, 0x13, 0x44, 0xfc, 0x54, 0x1f, 0xb5, 0xb2, 0x93, 0x7e, 0xd8,
+ 0xeb, 0xc0, 0xb3, 0x30, 0x40, 0x61, 0xc9, 0xad, 0x7e, 0xad, 0xc0, 0x6a, 0x52, 0x4b, 0x40, 0x07,
+ 0x7d, 0x56, 0xbc, 0x17, 0x58, 0xdf, 0x47, 0x5f, 0xf9, 0xbe, 0x57, 0xe0, 0x68, 0xbf, 0x4d, 0x8a,
+ 0x17, 0x19, 0x5f, 0x9f, 0xc2, 0xdd, 0x27, 0x2c, 0xb2, 0x1b, 0x42, 0x8a, 0xa5, 0x16, 0x9d, 0x82,
+ 0xa9, 0xaa, 0x61, 0x97, 0x77, 0xad, 0xcf, 0xda, 0x5b, 0x7d, 0x98, 0xe6, 0xef, 0x4b, 0x39, 0x0e,
+ 0x11, 0xe8, 0x12, 0x2c, 0x08, 0xbb, 0x4d, 0x62, 0x57, 0xbc, 0xaa, 0x78, 0x11, 0xb9, 0x9a, 0x84,
+ 0x53, 0xe7, 0x46, 0x97, 0x1e, 0xf7, 0x58, 0xa8, 0x7f, 0x2a, 0x80, 0x9e, 0x67, 0x9b, 0x38, 0x09,
+ 0x59, 0xc3, 0xb5, 0xc4, 0x8a, 0x1b, 0x14, 0x5a, 0x56, 0x9f, 0x6d, 0x35, 0x0b, 0xd9, 0xf5, 0x9d,
+ 0xab, 0x81, 0x10, 0x47, 0x7a, 0x0e, 0x6e, 0x0f, 0xda, 0x60, 0xa0, 0x4a, 0x70, 0xdb, 0x31, 0xc3,
+ 0x91, 0x1e, 0x5d, 0x84, 0x19, 0xb3, 0xe6, 0x33, 0x8f, 0xd0, 0x5d, 0xd3, 0x71, 0x89, 0x68, 0x4c,
+ 0x53, 0xfa, 0x51, 0x79, 0xa7, 0x99, 0x8d, 0x0e, 0x1d, 0x8e, 0x21, 0x91, 0x06, 0xc0, 0xcb, 0x8a,
+ 0xb9, 0x06, 0xf7, 0x93, 0x11, 0x7e, 0xe6, 0xf8, 0x83, 0x6d, 0x87, 0x52, 0xdc, 0x81, 0x50, 0xef,
+ 0xc3, 0xd2, 0x2e, 0xa1, 0x0d, 0xcb, 0x24, 0xeb, 0xa6, 0xe9, 0xf8, 0xb6, 0xd7, 0x5e, 0xd6, 0x8b,
+ 0x90, 0x0d, 0x61, 0xb2, 0xf2, 0x8e, 0x48, 0xff, 0xd9, 0x90, 0x0b, 0x47, 0x98, 0xb0, 0xd4, 0x53,
+ 0x03, 0x4b, 0xfd, 0xa7, 0x14, 0x4c, 0x46, 0xf4, 0xe9, 0x7d, 0xcb, 0x2e, 0x4b, 0xe6, 0x63, 0x6d,
+ 0xf4, 0x35, 0xcb, 0x2e, 0x3f, 0x6b, 0x16, 0xa6, 0x25, 0x8c, 0x7f, 0x62, 0x01, 0x44, 0x57, 0x21,
+ 0xed, 0x33, 0x42, 0x65, 0x11, 0x9f, 0x4c, 0x4a, 0xe6, 0x5b, 0x8c, 0xd0, 0xf6, 0x7e, 0x35, 0xc5,
+ 0x99, 0xb9, 0x00, 0x0b, 0x0a, 0xb4, 0x05, 0x99, 0x0a, 0x7f, 0x14, 0x59, 0xa7, 0xa7, 0x92, 0xb8,
+ 0x3a, 0x7f, 0xc4, 0x04, 0x69, 0x20, 0x24, 0x38, 0x60, 0x41, 0x0f, 0x60, 0x8e, 0xc5, 0x42, 0x28,
+ 0x9e, 0x6b, 0x84, 0x7d, 0xa9, 0x6f, 0xe0, 0x75, 0xd4, 0x6a, 0x16, 0xe6, 0xe2, 0x2a, 0xdc, 0xe5,
+ 0x40, 0x2d, 0xc2, 0x74, 0xc7, 0x05, 0x93, 0xbb, 0xac, 0x7e, 0xe9, 0xd1, 0xd3, 0xfc, 0xd8, 0xe3,
+ 0xa7, 0xf9, 0xb1, 0x27, 0x4f, 0xf3, 0x63, 0x9f, 0xb7, 0xf2, 0xca, 0xa3, 0x56, 0x5e, 0x79, 0xdc,
+ 0xca, 0x2b, 0x4f, 0x5a, 0x79, 0xe5, 0xd7, 0x56, 0x5e, 0xf9, 0xf2, 0xb7, 0xfc, 0xd8, 0x9d, 0xfc,
+ 0xf0, 0xff, 0xc5, 0xfe, 0x1d, 0x00, 0x00, 0xff, 0xff, 0xfd, 0x4d, 0x1e, 0x25, 0xc5, 0x15, 0x00,
+ 0x00,
+}
+
+func (m *ExemptPriorityLevelConfiguration) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ExemptPriorityLevelConfiguration) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ExemptPriorityLevelConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.LendablePercent != nil {
+ i = encodeVarintGenerated(dAtA, i, uint64(*m.LendablePercent))
+ i--
+ dAtA[i] = 0x10
+ }
+ if m.NominalConcurrencyShares != nil {
+ i = encodeVarintGenerated(dAtA, i, uint64(*m.NominalConcurrencyShares))
+ i--
+ dAtA[i] = 0x8
+ }
+ return len(dAtA) - i, nil
}
func (m *FlowDistinguisherMethod) Marshal() (dAtA []byte, err error) {
@@ -1491,6 +1557,18 @@ func (m *PriorityLevelConfigurationSpec) MarshalToSizedBuffer(dAtA []byte) (int,
_ = i
var l int
_ = l
+ if m.Exempt != nil {
+ {
+ size, err := m.Exempt.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
if m.Limited != nil {
{
size, err := m.Limited.MarshalToSizedBuffer(dAtA[:i])
@@ -1783,6 +1861,21 @@ func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
dAtA[offset] = uint8(v)
return base
}
+func (m *ExemptPriorityLevelConfiguration) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.NominalConcurrencyShares != nil {
+ n += 1 + sovGenerated(uint64(*m.NominalConcurrencyShares))
+ }
+ if m.LendablePercent != nil {
+ n += 1 + sovGenerated(uint64(*m.LendablePercent))
+ }
+ return n
+}
+
func (m *FlowDistinguisherMethod) Size() (n int) {
if m == nil {
return 0
@@ -2048,6 +2141,10 @@ func (m *PriorityLevelConfigurationSpec) Size() (n int) {
l = m.Limited.Size()
n += 1 + l + sovGenerated(uint64(l))
}
+ if m.Exempt != nil {
+ l = m.Exempt.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
return n
}
@@ -2165,6 +2262,17 @@ func sovGenerated(x uint64) (n int) {
func sozGenerated(x uint64) (n int) {
return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
+func (this *ExemptPriorityLevelConfiguration) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ExemptPriorityLevelConfiguration{`,
+ `NominalConcurrencyShares:` + valueToStringGenerated(this.NominalConcurrencyShares) + `,`,
+ `LendablePercent:` + valueToStringGenerated(this.LendablePercent) + `,`,
+ `}`,
+ }, "")
+ return s
+}
func (this *FlowDistinguisherMethod) String() string {
if this == nil {
return "nil"
@@ -2381,6 +2489,7 @@ func (this *PriorityLevelConfigurationSpec) String() string {
s := strings.Join([]string{`&PriorityLevelConfigurationSpec{`,
`Type:` + fmt.Sprintf("%v", this.Type) + `,`,
`Limited:` + strings.Replace(this.Limited.String(), "LimitedPriorityLevelConfiguration", "LimitedPriorityLevelConfiguration", 1) + `,`,
+ `Exempt:` + strings.Replace(this.Exempt.String(), "ExemptPriorityLevelConfiguration", "ExemptPriorityLevelConfiguration", 1) + `,`,
`}`,
}, "")
return s
@@ -2468,6 +2577,96 @@ func valueToStringGenerated(v interface{}) string {
pv := reflect.Indirect(rv).Interface()
return fmt.Sprintf("*%v", pv)
}
+func (m *ExemptPriorityLevelConfiguration) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ExemptPriorityLevelConfiguration: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ExemptPriorityLevelConfiguration: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NominalConcurrencyShares", wireType)
+ }
+ var v int32
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.NominalConcurrencyShares = &v
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LendablePercent", wireType)
+ }
+ var v int32
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.LendablePercent = &v
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
func (m *FlowDistinguisherMethod) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
@@ -4547,6 +4746,42 @@ func (m *PriorityLevelConfigurationSpec) Unmarshal(dAtA []byte) error {
return err
}
iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Exempt", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Exempt == nil {
+ m.Exempt = &ExemptPriorityLevelConfiguration{}
+ }
+ if err := m.Exempt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
diff --git a/vendor/k8s.io/api/flowcontrol/v1beta2/generated.proto b/vendor/k8s.io/api/flowcontrol/v1beta2/generated.proto
index 4c98f21bcf..a8c8a32737 100644
--- a/vendor/k8s.io/api/flowcontrol/v1beta2/generated.proto
+++ b/vendor/k8s.io/api/flowcontrol/v1beta2/generated.proto
@@ -28,6 +28,40 @@ import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
// Package-wide variables from generator "generated".
option go_package = "k8s.io/api/flowcontrol/v1beta2";
+// ExemptPriorityLevelConfiguration describes the configurable aspects
+// of the handling of exempt requests.
+// In the mandatory exempt configuration object the values in the fields
+// here can be modified by authorized users, unlike the rest of the `spec`.
+message ExemptPriorityLevelConfiguration {
+ // `nominalConcurrencyShares` (NCS) contributes to the computation of the
+ // NominalConcurrencyLimit (NominalCL) of this level.
+ // This is the number of execution seats nominally reserved for this priority level.
+ // This DOES NOT limit the dispatching from this priority level
+ // but affects the other priority levels through the borrowing mechanism.
+ // The server's concurrency limit (ServerCL) is divided among all the
+ // priority levels in proportion to their NCS values:
+ //
+ // NominalCL(i) = ceil( ServerCL * NCS(i) / sum_ncs )
+ // sum_ncs = sum[priority level k] NCS(k)
+ //
+ // Bigger numbers mean a larger nominal concurrency limit,
+ // at the expense of every other priority level.
+ // This field has a default value of zero.
+ // +optional
+ optional int32 nominalConcurrencyShares = 1;
+
+ // `lendablePercent` prescribes the fraction of the level's NominalCL that
+ // can be borrowed by other priority levels. This value of this
+ // field must be between 0 and 100, inclusive, and it defaults to 0.
+ // The number of seats that other levels can borrow from this level, known
+ // as this level's LendableConcurrencyLimit (LendableCL), is defined as follows.
+ //
+ // LendableCL(i) = round( NominalCL(i) * lendablePercent(i)/100.0 )
+ //
+ // +optional
+ optional int32 lendablePercent = 2;
+}
+
// FlowDistinguisherMethod specifies the method of a flow distinguisher.
message FlowDistinguisherMethod {
// `type` is the type of flow distinguisher method
@@ -332,6 +366,14 @@ message PriorityLevelConfigurationSpec {
// This field must be non-empty if and only if `type` is `"Limited"`.
// +optional
optional LimitedPriorityLevelConfiguration limited = 2;
+
+ // `exempt` specifies how requests are handled for an exempt priority level.
+ // This field MUST be empty if `type` is `"Limited"`.
+ // This field MAY be non-empty if `type` is `"Exempt"`.
+ // If empty and `type` is `"Exempt"` then the default values
+ // for `ExemptPriorityLevelConfiguration` apply.
+ // +optional
+ optional ExemptPriorityLevelConfiguration exempt = 3;
}
// PriorityLevelConfigurationStatus represents the current state of a "request-priority".
diff --git a/vendor/k8s.io/api/flowcontrol/v1beta2/types.go b/vendor/k8s.io/api/flowcontrol/v1beta2/types.go
index 75409cee3e..e8cf7abfff 100644
--- a/vendor/k8s.io/api/flowcontrol/v1beta2/types.go
+++ b/vendor/k8s.io/api/flowcontrol/v1beta2/types.go
@@ -77,7 +77,9 @@ const (
// is a boolean false or has an invalid boolean representation
// (if the cluster operator sets it to 'false' it will be stomped)
// - any changes to the spec made by the cluster operator will be
- // stomped.
+ // stomped, except for changes to the `nominalConcurrencyShares`
+ // and `lendablePercent` fields of the PriorityLevelConfiguration
+ // named "exempt".
//
// The kube-apiserver will apply updates on the suggested configuration if:
// - the cluster operator has enabled auto-update by setting the annotation
@@ -435,6 +437,14 @@ type PriorityLevelConfigurationSpec struct {
// This field must be non-empty if and only if `type` is `"Limited"`.
// +optional
Limited *LimitedPriorityLevelConfiguration `json:"limited,omitempty" protobuf:"bytes,2,opt,name=limited"`
+
+ // `exempt` specifies how requests are handled for an exempt priority level.
+ // This field MUST be empty if `type` is `"Limited"`.
+ // This field MAY be non-empty if `type` is `"Exempt"`.
+ // If empty and `type` is `"Exempt"` then the default values
+ // for `ExemptPriorityLevelConfiguration` apply.
+ // +optional
+ Exempt *ExemptPriorityLevelConfiguration `json:"exempt,omitempty" protobuf:"bytes,3,opt,name=exempt"`
}
// PriorityLevelEnablement indicates whether limits on execution are enabled for the priority level
@@ -505,6 +515,43 @@ type LimitedPriorityLevelConfiguration struct {
BorrowingLimitPercent *int32 `json:"borrowingLimitPercent,omitempty" protobuf:"varint,4,opt,name=borrowingLimitPercent"`
}
+// ExemptPriorityLevelConfiguration describes the configurable aspects
+// of the handling of exempt requests.
+// In the mandatory exempt configuration object the values in the fields
+// here can be modified by authorized users, unlike the rest of the `spec`.
+type ExemptPriorityLevelConfiguration struct {
+ // `nominalConcurrencyShares` (NCS) contributes to the computation of the
+ // NominalConcurrencyLimit (NominalCL) of this level.
+ // This is the number of execution seats nominally reserved for this priority level.
+ // This DOES NOT limit the dispatching from this priority level
+ // but affects the other priority levels through the borrowing mechanism.
+ // The server's concurrency limit (ServerCL) is divided among all the
+ // priority levels in proportion to their NCS values:
+ //
+ // NominalCL(i) = ceil( ServerCL * NCS(i) / sum_ncs )
+ // sum_ncs = sum[priority level k] NCS(k)
+ //
+ // Bigger numbers mean a larger nominal concurrency limit,
+ // at the expense of every other priority level.
+ // This field has a default value of zero.
+ // +optional
+ NominalConcurrencyShares *int32 `json:"nominalConcurrencyShares,omitempty" protobuf:"varint,1,opt,name=nominalConcurrencyShares"`
+ // `lendablePercent` prescribes the fraction of the level's NominalCL that
+ // can be borrowed by other priority levels. This value of this
+ // field must be between 0 and 100, inclusive, and it defaults to 0.
+ // The number of seats that other levels can borrow from this level, known
+ // as this level's LendableConcurrencyLimit (LendableCL), is defined as follows.
+ //
+ // LendableCL(i) = round( NominalCL(i) * lendablePercent(i)/100.0 )
+ //
+ // +optional
+ LendablePercent *int32 `json:"lendablePercent,omitempty" protobuf:"varint,2,opt,name=lendablePercent"`
+ // The `BorrowingCL` of an Exempt priority level is implicitly `ServerCL`.
+ // In other words, an exempt priority level
+ // has no meaningful limit on how much it borrows.
+ // There is no explicit representation of that here.
+}
+
// LimitResponse defines how to handle requests that can not be executed right now.
// +union
type LimitResponse struct {
diff --git a/vendor/k8s.io/api/flowcontrol/v1beta2/types_swagger_doc_generated.go b/vendor/k8s.io/api/flowcontrol/v1beta2/types_swagger_doc_generated.go
index b2eff7f96e..49a4178096 100644
--- a/vendor/k8s.io/api/flowcontrol/v1beta2/types_swagger_doc_generated.go
+++ b/vendor/k8s.io/api/flowcontrol/v1beta2/types_swagger_doc_generated.go
@@ -27,6 +27,16 @@ package v1beta2
// Those methods can be generated by using hack/update-codegen.sh
// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT.
+var map_ExemptPriorityLevelConfiguration = map[string]string{
+ "": "ExemptPriorityLevelConfiguration describes the configurable aspects of the handling of exempt requests. In the mandatory exempt configuration object the values in the fields here can be modified by authorized users, unlike the rest of the `spec`.",
+ "nominalConcurrencyShares": "`nominalConcurrencyShares` (NCS) contributes to the computation of the NominalConcurrencyLimit (NominalCL) of this level. This is the number of execution seats nominally reserved for this priority level. This DOES NOT limit the dispatching from this priority level but affects the other priority levels through the borrowing mechanism. The server's concurrency limit (ServerCL) is divided among all the priority levels in proportion to their NCS values:\n\nNominalCL(i) = ceil( ServerCL * NCS(i) / sum_ncs ) sum_ncs = sum[priority level k] NCS(k)\n\nBigger numbers mean a larger nominal concurrency limit, at the expense of every other priority level. This field has a default value of zero.",
+ "lendablePercent": "`lendablePercent` prescribes the fraction of the level's NominalCL that can be borrowed by other priority levels. This value of this field must be between 0 and 100, inclusive, and it defaults to 0. The number of seats that other levels can borrow from this level, known as this level's LendableConcurrencyLimit (LendableCL), is defined as follows.\n\nLendableCL(i) = round( NominalCL(i) * lendablePercent(i)/100.0 )",
+}
+
+func (ExemptPriorityLevelConfiguration) SwaggerDoc() map[string]string {
+ return map_ExemptPriorityLevelConfiguration
+}
+
var map_FlowDistinguisherMethod = map[string]string{
"": "FlowDistinguisherMethod specifies the method of a flow distinguisher.",
"type": "`type` is the type of flow distinguisher method The supported types are \"ByUser\" and \"ByNamespace\". Required.",
@@ -190,6 +200,7 @@ var map_PriorityLevelConfigurationSpec = map[string]string{
"": "PriorityLevelConfigurationSpec specifies the configuration of a priority level.",
"type": "`type` indicates whether this priority level is subject to limitation on request execution. A value of `\"Exempt\"` means that requests of this priority level are not subject to a limit (and thus are never queued) and do not detract from the capacity made available to other priority levels. A value of `\"Limited\"` means that (a) requests of this priority level _are_ subject to limits and (b) some of the server's limited capacity is made available exclusively to this priority level. Required.",
"limited": "`limited` specifies how requests are handled for a Limited priority level. This field must be non-empty if and only if `type` is `\"Limited\"`.",
+ "exempt": "`exempt` specifies how requests are handled for an exempt priority level. This field MUST be empty if `type` is `\"Limited\"`. This field MAY be non-empty if `type` is `\"Exempt\"`. If empty and `type` is `\"Exempt\"` then the default values for `ExemptPriorityLevelConfiguration` apply.",
}
func (PriorityLevelConfigurationSpec) SwaggerDoc() map[string]string {
diff --git a/vendor/k8s.io/api/flowcontrol/v1beta2/zz_generated.deepcopy.go b/vendor/k8s.io/api/flowcontrol/v1beta2/zz_generated.deepcopy.go
index aa692484c1..e0605b95d7 100644
--- a/vendor/k8s.io/api/flowcontrol/v1beta2/zz_generated.deepcopy.go
+++ b/vendor/k8s.io/api/flowcontrol/v1beta2/zz_generated.deepcopy.go
@@ -25,6 +25,32 @@ import (
runtime "k8s.io/apimachinery/pkg/runtime"
)
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ExemptPriorityLevelConfiguration) DeepCopyInto(out *ExemptPriorityLevelConfiguration) {
+ *out = *in
+ if in.NominalConcurrencyShares != nil {
+ in, out := &in.NominalConcurrencyShares, &out.NominalConcurrencyShares
+ *out = new(int32)
+ **out = **in
+ }
+ if in.LendablePercent != nil {
+ in, out := &in.LendablePercent, &out.LendablePercent
+ *out = new(int32)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExemptPriorityLevelConfiguration.
+func (in *ExemptPriorityLevelConfiguration) DeepCopy() *ExemptPriorityLevelConfiguration {
+ if in == nil {
+ return nil
+ }
+ out := new(ExemptPriorityLevelConfiguration)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *FlowDistinguisherMethod) DeepCopyInto(out *FlowDistinguisherMethod) {
*out = *in
@@ -400,6 +426,11 @@ func (in *PriorityLevelConfigurationSpec) DeepCopyInto(out *PriorityLevelConfigu
*out = new(LimitedPriorityLevelConfiguration)
(*in).DeepCopyInto(*out)
}
+ if in.Exempt != nil {
+ in, out := &in.Exempt, &out.Exempt
+ *out = new(ExemptPriorityLevelConfiguration)
+ (*in).DeepCopyInto(*out)
+ }
return
}
diff --git a/vendor/k8s.io/api/flowcontrol/v1beta3/generated.pb.go b/vendor/k8s.io/api/flowcontrol/v1beta3/generated.pb.go
index 166e8520b7..c6598306d9 100644
--- a/vendor/k8s.io/api/flowcontrol/v1beta3/generated.pb.go
+++ b/vendor/k8s.io/api/flowcontrol/v1beta3/generated.pb.go
@@ -43,10 +43,38 @@ var _ = math.Inf
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+func (m *ExemptPriorityLevelConfiguration) Reset() { *m = ExemptPriorityLevelConfiguration{} }
+func (*ExemptPriorityLevelConfiguration) ProtoMessage() {}
+func (*ExemptPriorityLevelConfiguration) Descriptor() ([]byte, []int) {
+ return fileDescriptor_803504887082f044, []int{0}
+}
+func (m *ExemptPriorityLevelConfiguration) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ExemptPriorityLevelConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ExemptPriorityLevelConfiguration) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ExemptPriorityLevelConfiguration.Merge(m, src)
+}
+func (m *ExemptPriorityLevelConfiguration) XXX_Size() int {
+ return m.Size()
+}
+func (m *ExemptPriorityLevelConfiguration) XXX_DiscardUnknown() {
+ xxx_messageInfo_ExemptPriorityLevelConfiguration.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ExemptPriorityLevelConfiguration proto.InternalMessageInfo
+
func (m *FlowDistinguisherMethod) Reset() { *m = FlowDistinguisherMethod{} }
func (*FlowDistinguisherMethod) ProtoMessage() {}
func (*FlowDistinguisherMethod) Descriptor() ([]byte, []int) {
- return fileDescriptor_803504887082f044, []int{0}
+ return fileDescriptor_803504887082f044, []int{1}
}
func (m *FlowDistinguisherMethod) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -74,7 +102,7 @@ var xxx_messageInfo_FlowDistinguisherMethod proto.InternalMessageInfo
func (m *FlowSchema) Reset() { *m = FlowSchema{} }
func (*FlowSchema) ProtoMessage() {}
func (*FlowSchema) Descriptor() ([]byte, []int) {
- return fileDescriptor_803504887082f044, []int{1}
+ return fileDescriptor_803504887082f044, []int{2}
}
func (m *FlowSchema) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -102,7 +130,7 @@ var xxx_messageInfo_FlowSchema proto.InternalMessageInfo
func (m *FlowSchemaCondition) Reset() { *m = FlowSchemaCondition{} }
func (*FlowSchemaCondition) ProtoMessage() {}
func (*FlowSchemaCondition) Descriptor() ([]byte, []int) {
- return fileDescriptor_803504887082f044, []int{2}
+ return fileDescriptor_803504887082f044, []int{3}
}
func (m *FlowSchemaCondition) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -130,7 +158,7 @@ var xxx_messageInfo_FlowSchemaCondition proto.InternalMessageInfo
func (m *FlowSchemaList) Reset() { *m = FlowSchemaList{} }
func (*FlowSchemaList) ProtoMessage() {}
func (*FlowSchemaList) Descriptor() ([]byte, []int) {
- return fileDescriptor_803504887082f044, []int{3}
+ return fileDescriptor_803504887082f044, []int{4}
}
func (m *FlowSchemaList) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -158,7 +186,7 @@ var xxx_messageInfo_FlowSchemaList proto.InternalMessageInfo
func (m *FlowSchemaSpec) Reset() { *m = FlowSchemaSpec{} }
func (*FlowSchemaSpec) ProtoMessage() {}
func (*FlowSchemaSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_803504887082f044, []int{4}
+ return fileDescriptor_803504887082f044, []int{5}
}
func (m *FlowSchemaSpec) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -186,7 +214,7 @@ var xxx_messageInfo_FlowSchemaSpec proto.InternalMessageInfo
func (m *FlowSchemaStatus) Reset() { *m = FlowSchemaStatus{} }
func (*FlowSchemaStatus) ProtoMessage() {}
func (*FlowSchemaStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_803504887082f044, []int{5}
+ return fileDescriptor_803504887082f044, []int{6}
}
func (m *FlowSchemaStatus) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -214,7 +242,7 @@ var xxx_messageInfo_FlowSchemaStatus proto.InternalMessageInfo
func (m *GroupSubject) Reset() { *m = GroupSubject{} }
func (*GroupSubject) ProtoMessage() {}
func (*GroupSubject) Descriptor() ([]byte, []int) {
- return fileDescriptor_803504887082f044, []int{6}
+ return fileDescriptor_803504887082f044, []int{7}
}
func (m *GroupSubject) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -242,7 +270,7 @@ var xxx_messageInfo_GroupSubject proto.InternalMessageInfo
func (m *LimitResponse) Reset() { *m = LimitResponse{} }
func (*LimitResponse) ProtoMessage() {}
func (*LimitResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_803504887082f044, []int{7}
+ return fileDescriptor_803504887082f044, []int{8}
}
func (m *LimitResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -270,7 +298,7 @@ var xxx_messageInfo_LimitResponse proto.InternalMessageInfo
func (m *LimitedPriorityLevelConfiguration) Reset() { *m = LimitedPriorityLevelConfiguration{} }
func (*LimitedPriorityLevelConfiguration) ProtoMessage() {}
func (*LimitedPriorityLevelConfiguration) Descriptor() ([]byte, []int) {
- return fileDescriptor_803504887082f044, []int{8}
+ return fileDescriptor_803504887082f044, []int{9}
}
func (m *LimitedPriorityLevelConfiguration) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -298,7 +326,7 @@ var xxx_messageInfo_LimitedPriorityLevelConfiguration proto.InternalMessageInfo
func (m *NonResourcePolicyRule) Reset() { *m = NonResourcePolicyRule{} }
func (*NonResourcePolicyRule) ProtoMessage() {}
func (*NonResourcePolicyRule) Descriptor() ([]byte, []int) {
- return fileDescriptor_803504887082f044, []int{9}
+ return fileDescriptor_803504887082f044, []int{10}
}
func (m *NonResourcePolicyRule) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -326,7 +354,7 @@ var xxx_messageInfo_NonResourcePolicyRule proto.InternalMessageInfo
func (m *PolicyRulesWithSubjects) Reset() { *m = PolicyRulesWithSubjects{} }
func (*PolicyRulesWithSubjects) ProtoMessage() {}
func (*PolicyRulesWithSubjects) Descriptor() ([]byte, []int) {
- return fileDescriptor_803504887082f044, []int{10}
+ return fileDescriptor_803504887082f044, []int{11}
}
func (m *PolicyRulesWithSubjects) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -354,7 +382,7 @@ var xxx_messageInfo_PolicyRulesWithSubjects proto.InternalMessageInfo
func (m *PriorityLevelConfiguration) Reset() { *m = PriorityLevelConfiguration{} }
func (*PriorityLevelConfiguration) ProtoMessage() {}
func (*PriorityLevelConfiguration) Descriptor() ([]byte, []int) {
- return fileDescriptor_803504887082f044, []int{11}
+ return fileDescriptor_803504887082f044, []int{12}
}
func (m *PriorityLevelConfiguration) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -382,7 +410,7 @@ var xxx_messageInfo_PriorityLevelConfiguration proto.InternalMessageInfo
func (m *PriorityLevelConfigurationCondition) Reset() { *m = PriorityLevelConfigurationCondition{} }
func (*PriorityLevelConfigurationCondition) ProtoMessage() {}
func (*PriorityLevelConfigurationCondition) Descriptor() ([]byte, []int) {
- return fileDescriptor_803504887082f044, []int{12}
+ return fileDescriptor_803504887082f044, []int{13}
}
func (m *PriorityLevelConfigurationCondition) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -410,7 +438,7 @@ var xxx_messageInfo_PriorityLevelConfigurationCondition proto.InternalMessageInf
func (m *PriorityLevelConfigurationList) Reset() { *m = PriorityLevelConfigurationList{} }
func (*PriorityLevelConfigurationList) ProtoMessage() {}
func (*PriorityLevelConfigurationList) Descriptor() ([]byte, []int) {
- return fileDescriptor_803504887082f044, []int{13}
+ return fileDescriptor_803504887082f044, []int{14}
}
func (m *PriorityLevelConfigurationList) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -438,7 +466,7 @@ var xxx_messageInfo_PriorityLevelConfigurationList proto.InternalMessageInfo
func (m *PriorityLevelConfigurationReference) Reset() { *m = PriorityLevelConfigurationReference{} }
func (*PriorityLevelConfigurationReference) ProtoMessage() {}
func (*PriorityLevelConfigurationReference) Descriptor() ([]byte, []int) {
- return fileDescriptor_803504887082f044, []int{14}
+ return fileDescriptor_803504887082f044, []int{15}
}
func (m *PriorityLevelConfigurationReference) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -466,7 +494,7 @@ var xxx_messageInfo_PriorityLevelConfigurationReference proto.InternalMessageInf
func (m *PriorityLevelConfigurationSpec) Reset() { *m = PriorityLevelConfigurationSpec{} }
func (*PriorityLevelConfigurationSpec) ProtoMessage() {}
func (*PriorityLevelConfigurationSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_803504887082f044, []int{15}
+ return fileDescriptor_803504887082f044, []int{16}
}
func (m *PriorityLevelConfigurationSpec) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -494,7 +522,7 @@ var xxx_messageInfo_PriorityLevelConfigurationSpec proto.InternalMessageInfo
func (m *PriorityLevelConfigurationStatus) Reset() { *m = PriorityLevelConfigurationStatus{} }
func (*PriorityLevelConfigurationStatus) ProtoMessage() {}
func (*PriorityLevelConfigurationStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_803504887082f044, []int{16}
+ return fileDescriptor_803504887082f044, []int{17}
}
func (m *PriorityLevelConfigurationStatus) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -522,7 +550,7 @@ var xxx_messageInfo_PriorityLevelConfigurationStatus proto.InternalMessageInfo
func (m *QueuingConfiguration) Reset() { *m = QueuingConfiguration{} }
func (*QueuingConfiguration) ProtoMessage() {}
func (*QueuingConfiguration) Descriptor() ([]byte, []int) {
- return fileDescriptor_803504887082f044, []int{17}
+ return fileDescriptor_803504887082f044, []int{18}
}
func (m *QueuingConfiguration) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -550,7 +578,7 @@ var xxx_messageInfo_QueuingConfiguration proto.InternalMessageInfo
func (m *ResourcePolicyRule) Reset() { *m = ResourcePolicyRule{} }
func (*ResourcePolicyRule) ProtoMessage() {}
func (*ResourcePolicyRule) Descriptor() ([]byte, []int) {
- return fileDescriptor_803504887082f044, []int{18}
+ return fileDescriptor_803504887082f044, []int{19}
}
func (m *ResourcePolicyRule) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -578,7 +606,7 @@ var xxx_messageInfo_ResourcePolicyRule proto.InternalMessageInfo
func (m *ServiceAccountSubject) Reset() { *m = ServiceAccountSubject{} }
func (*ServiceAccountSubject) ProtoMessage() {}
func (*ServiceAccountSubject) Descriptor() ([]byte, []int) {
- return fileDescriptor_803504887082f044, []int{19}
+ return fileDescriptor_803504887082f044, []int{20}
}
func (m *ServiceAccountSubject) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -606,7 +634,7 @@ var xxx_messageInfo_ServiceAccountSubject proto.InternalMessageInfo
func (m *Subject) Reset() { *m = Subject{} }
func (*Subject) ProtoMessage() {}
func (*Subject) Descriptor() ([]byte, []int) {
- return fileDescriptor_803504887082f044, []int{20}
+ return fileDescriptor_803504887082f044, []int{21}
}
func (m *Subject) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -634,7 +662,7 @@ var xxx_messageInfo_Subject proto.InternalMessageInfo
func (m *UserSubject) Reset() { *m = UserSubject{} }
func (*UserSubject) ProtoMessage() {}
func (*UserSubject) Descriptor() ([]byte, []int) {
- return fileDescriptor_803504887082f044, []int{21}
+ return fileDescriptor_803504887082f044, []int{22}
}
func (m *UserSubject) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -660,6 +688,7 @@ func (m *UserSubject) XXX_DiscardUnknown() {
var xxx_messageInfo_UserSubject proto.InternalMessageInfo
func init() {
+ proto.RegisterType((*ExemptPriorityLevelConfiguration)(nil), "k8s.io.api.flowcontrol.v1beta3.ExemptPriorityLevelConfiguration")
proto.RegisterType((*FlowDistinguisherMethod)(nil), "k8s.io.api.flowcontrol.v1beta3.FlowDistinguisherMethod")
proto.RegisterType((*FlowSchema)(nil), "k8s.io.api.flowcontrol.v1beta3.FlowSchema")
proto.RegisterType((*FlowSchemaCondition)(nil), "k8s.io.api.flowcontrol.v1beta3.FlowSchemaCondition")
@@ -689,104 +718,141 @@ func init() {
}
var fileDescriptor_803504887082f044 = []byte{
- // 1552 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x58, 0x4d, 0x6f, 0xdb, 0x46,
- 0x13, 0x36, 0x65, 0xc9, 0xb6, 0xd6, 0x9f, 0x59, 0xc7, 0xb0, 0x5e, 0x07, 0x90, 0x1c, 0xbe, 0xc0,
- 0x9b, 0xb7, 0x4d, 0x42, 0xe5, 0xb3, 0x49, 0x5b, 0xf4, 0x23, 0x74, 0xda, 0x34, 0x8d, 0xed, 0x38,
- 0xeb, 0xa4, 0x2d, 0xd2, 0x00, 0x0d, 0x45, 0xad, 0xa9, 0x8d, 0x25, 0x92, 0xd9, 0x25, 0x65, 0xb8,
- 0xb9, 0x14, 0xfd, 0x05, 0x3d, 0xb7, 0xc7, 0x1e, 0x7a, 0xef, 0x1f, 0xe8, 0xb1, 0x41, 0x4f, 0x39,
- 0xe6, 0xa4, 0x36, 0xea, 0xa9, 0xff, 0xa0, 0x0d, 0x50, 0xa0, 0xd8, 0xe5, 0x92, 0x14, 0xa9, 0x0f,
- 0x0a, 0x09, 0x90, 0x53, 0x6f, 0xe6, 0xcc, 0x33, 0xcf, 0xec, 0xcc, 0xce, 0xcc, 0x8e, 0x0c, 0xae,
- 0xed, 0x5f, 0x66, 0x1a, 0x71, 0xaa, 0xfb, 0x7e, 0x0d, 0x53, 0x1b, 0x7b, 0x98, 0x55, 0xdb, 0xd8,
- 0xae, 0x3b, 0xb4, 0x2a, 0x15, 0x86, 0x4b, 0xaa, 0x7b, 0x4d, 0xe7, 0xc0, 0x74, 0x6c, 0x8f, 0x3a,
- 0xcd, 0x6a, 0xfb, 0x6c, 0x0d, 0x7b, 0xc6, 0xf9, 0xaa, 0x85, 0x6d, 0x4c, 0x0d, 0x0f, 0xd7, 0x35,
- 0x97, 0x3a, 0x9e, 0x03, 0xcb, 0x01, 0x5e, 0x33, 0x5c, 0xa2, 0xf5, 0xe0, 0x35, 0x89, 0x5f, 0x3b,
- 0x6d, 0x11, 0xaf, 0xe1, 0xd7, 0x34, 0xd3, 0x69, 0x55, 0x2d, 0xc7, 0x72, 0xaa, 0xc2, 0xac, 0xe6,
- 0xef, 0x89, 0x2f, 0xf1, 0x21, 0xfe, 0x0a, 0xe8, 0xd6, 0x2e, 0xc4, 0xee, 0x5b, 0x86, 0xd9, 0x20,
- 0x36, 0xa6, 0x87, 0x55, 0x77, 0xdf, 0xe2, 0x02, 0x56, 0x6d, 0x61, 0xcf, 0xa8, 0xb6, 0xcf, 0xa6,
- 0x0f, 0xb1, 0x56, 0x1d, 0x66, 0x45, 0x7d, 0xdb, 0x23, 0x2d, 0xdc, 0x67, 0xf0, 0x46, 0x96, 0x01,
- 0x33, 0x1b, 0xb8, 0x65, 0xa4, 0xed, 0xd4, 0xbb, 0x60, 0xf5, 0xc3, 0xa6, 0x73, 0x70, 0x95, 0x30,
- 0x8f, 0xd8, 0x96, 0x4f, 0x58, 0x03, 0xd3, 0x2d, 0xec, 0x35, 0x9c, 0x3a, 0x7c, 0x0f, 0xe4, 0xbd,
- 0x43, 0x17, 0x97, 0x94, 0x75, 0xe5, 0xff, 0x45, 0xfd, 0xe4, 0xe3, 0x4e, 0x65, 0xa2, 0xdb, 0xa9,
- 0xe4, 0x6f, 0x1f, 0xba, 0xf8, 0x79, 0xa7, 0x72, 0x6c, 0x88, 0x19, 0x57, 0x23, 0x61, 0xa8, 0x7e,
- 0x9b, 0x03, 0x80, 0xa3, 0x76, 0x85, 0x6b, 0x78, 0x1f, 0xcc, 0xf0, 0x70, 0xeb, 0x86, 0x67, 0x08,
- 0xce, 0xd9, 0x73, 0x67, 0xb4, 0x38, 0xd7, 0xd1, 0xa9, 0x35, 0x77, 0xdf, 0xe2, 0x02, 0xa6, 0x71,
- 0xb4, 0xd6, 0x3e, 0xab, 0xdd, 0xac, 0x3d, 0xc0, 0xa6, 0xb7, 0x85, 0x3d, 0x43, 0x87, 0xf2, 0x14,
- 0x20, 0x96, 0xa1, 0x88, 0x15, 0xee, 0x80, 0x3c, 0x73, 0xb1, 0x59, 0xca, 0x09, 0x76, 0x4d, 0x1b,
- 0x7d, 0x93, 0x5a, 0x7c, 0xb6, 0x5d, 0x17, 0x9b, 0xfa, 0x5c, 0x18, 0x21, 0xff, 0x42, 0x82, 0x09,
- 0x7e, 0x06, 0xa6, 0x98, 0x67, 0x78, 0x3e, 0x2b, 0x4d, 0xf6, 0x9d, 0x38, 0x8b, 0x53, 0xd8, 0xe9,
- 0x0b, 0x92, 0x75, 0x2a, 0xf8, 0x46, 0x92, 0x4f, 0x7d, 0x9a, 0x03, 0xcb, 0x31, 0x78, 0xc3, 0xb1,
- 0xeb, 0xc4, 0x23, 0x8e, 0x0d, 0xdf, 0x4e, 0x64, 0xfd, 0x44, 0x2a, 0xeb, 0xab, 0x03, 0x4c, 0xe2,
- 0x8c, 0xc3, 0x37, 0xa3, 0xe3, 0xe6, 0x84, 0xf9, 0xf1, 0xa4, 0xf3, 0xe7, 0x9d, 0xca, 0x62, 0x64,
- 0x96, 0x3c, 0x0f, 0x6c, 0x03, 0xd8, 0x34, 0x98, 0x77, 0x9b, 0x1a, 0x36, 0x0b, 0x68, 0x49, 0x0b,
- 0xcb, 0xa8, 0x5f, 0x1f, 0xef, 0x9e, 0xb8, 0x85, 0xbe, 0x26, 0x5d, 0xc2, 0xcd, 0x3e, 0x36, 0x34,
- 0xc0, 0x03, 0xfc, 0x1f, 0x98, 0xa2, 0xd8, 0x60, 0x8e, 0x5d, 0xca, 0x8b, 0x23, 0x47, 0xf9, 0x42,
- 0x42, 0x8a, 0xa4, 0x16, 0xbe, 0x06, 0xa6, 0x5b, 0x98, 0x31, 0xc3, 0xc2, 0xa5, 0x82, 0x00, 0x2e,
- 0x4a, 0xe0, 0xf4, 0x56, 0x20, 0x46, 0xa1, 0x5e, 0xfd, 0x49, 0x01, 0x0b, 0x71, 0x9e, 0x36, 0x09,
- 0xf3, 0xe0, 0xbd, 0xbe, 0xda, 0xd3, 0xc6, 0x8b, 0x89, 0x5b, 0x8b, 0xca, 0x5b, 0x92, 0xee, 0x66,
- 0x42, 0x49, 0x4f, 0xdd, 0xdd, 0x04, 0x05, 0xe2, 0xe1, 0x16, 0xcf, 0xfa, 0x64, 0x2a, 0x5d, 0x19,
- 0x45, 0xa2, 0xcf, 0x4b, 0xda, 0xc2, 0x75, 0x4e, 0x80, 0x02, 0x1e, 0xf5, 0x8f, 0xc9, 0xde, 0x08,
- 0x78, 0x3d, 0xc2, 0x1f, 0x14, 0xb0, 0xe6, 0x52, 0xe2, 0x50, 0xe2, 0x1d, 0x6e, 0xe2, 0x36, 0x6e,
- 0x6e, 0x38, 0xf6, 0x1e, 0xb1, 0x7c, 0x6a, 0xf0, 0x54, 0xca, 0xa0, 0x36, 0xb2, 0x3c, 0xef, 0x0c,
- 0x65, 0x40, 0x78, 0x0f, 0x53, 0x6c, 0x9b, 0x58, 0x57, 0xe5, 0x91, 0xd6, 0x46, 0x80, 0x47, 0x1c,
- 0x05, 0x7e, 0x0c, 0x60, 0xcb, 0xf0, 0x78, 0x46, 0xad, 0x1d, 0x8a, 0x4d, 0x5c, 0xe7, 0xac, 0xa2,
- 0x20, 0x0b, 0x71, 0x75, 0x6c, 0xf5, 0x21, 0xd0, 0x00, 0x2b, 0xf8, 0xb5, 0x02, 0x96, 0xeb, 0xfd,
- 0x43, 0x46, 0xd6, 0xe5, 0xa5, 0x71, 0x12, 0x3d, 0x60, 0x46, 0xe9, 0xab, 0xdd, 0x4e, 0x65, 0x79,
- 0x80, 0x02, 0x0d, 0x72, 0x06, 0xef, 0x81, 0x02, 0xf5, 0x9b, 0x98, 0x95, 0xf2, 0xe2, 0x7a, 0x33,
- 0xbd, 0xee, 0x38, 0x4d, 0x62, 0x1e, 0x22, 0x6e, 0xf2, 0x29, 0xf1, 0x1a, 0xbb, 0xbe, 0x98, 0x55,
- 0x2c, 0xbe, 0x6b, 0xa1, 0x42, 0x01, 0xa9, 0xfa, 0x08, 0x2c, 0xa5, 0x87, 0x06, 0xb4, 0x00, 0x30,
- 0xc3, 0x3e, 0x65, 0x25, 0x45, 0xb8, 0x3d, 0x3f, 0x7e, 0x55, 0x45, 0x3d, 0x1e, 0xcf, 0xcb, 0x48,
- 0xc4, 0x50, 0x0f, 0xb5, 0x7a, 0x06, 0xcc, 0x5d, 0xa3, 0x8e, 0xef, 0xca, 0x33, 0xc2, 0x75, 0x90,
- 0xb7, 0x8d, 0x56, 0x38, 0x7d, 0xa2, 0x89, 0xb8, 0x6d, 0xb4, 0x30, 0x12, 0x1a, 0xf5, 0x7b, 0x05,
- 0xcc, 0x6f, 0x92, 0x16, 0xf1, 0x10, 0x66, 0xae, 0x63, 0x33, 0x0c, 0x2f, 0x26, 0x26, 0xd6, 0xf1,
- 0xd4, 0xc4, 0x3a, 0x92, 0x00, 0xf7, 0xcc, 0xaa, 0xcf, 0xc1, 0xf4, 0x43, 0x1f, 0xfb, 0xc4, 0xb6,
- 0xe4, 0xbc, 0xbe, 0x90, 0x15, 0xe0, 0xad, 0x00, 0x9e, 0xa8, 0x36, 0x7d, 0x96, 0x8f, 0x00, 0xa9,
- 0x41, 0x21, 0xa3, 0xfa, 0x77, 0x0e, 0x1c, 0x17, 0x8e, 0x71, 0x7d, 0x78, 0x15, 0xc3, 0x7b, 0xa0,
- 0x64, 0x3b, 0x2d, 0x62, 0x1b, 0x5c, 0x6e, 0xfa, 0x94, 0xd7, 0xff, 0xe1, 0x6e, 0xc3, 0xa0, 0x98,
- 0x89, 0x68, 0x0a, 0xfa, 0xba, 0x8c, 0xa6, 0xb4, 0x3d, 0x04, 0x87, 0x86, 0x32, 0xc0, 0x07, 0x60,
- 0xbe, 0xd9, 0x1b, 0xbb, 0x0c, 0xf3, 0x74, 0x56, 0x98, 0x89, 0x84, 0xe9, 0x2b, 0xf2, 0x04, 0xc9,
- 0xa4, 0xa3, 0x24, 0x35, 0x7c, 0x07, 0x2c, 0x36, 0xb1, 0x5d, 0x37, 0x6a, 0x4d, 0xbc, 0x83, 0xa9,
- 0x89, 0x6d, 0x4f, 0xb4, 0x48, 0x41, 0x5f, 0xee, 0x76, 0x2a, 0x8b, 0x9b, 0x49, 0x15, 0x4a, 0x63,
- 0xe1, 0x4d, 0xb0, 0x52, 0x73, 0x28, 0x75, 0x0e, 0x88, 0x6d, 0x09, 0x3f, 0x21, 0x49, 0x5e, 0x90,
- 0xfc, 0xa7, 0xdb, 0xa9, 0xac, 0xe8, 0x83, 0x00, 0x68, 0xb0, 0x9d, 0x7a, 0x00, 0x56, 0xb6, 0xf9,
- 0x4c, 0x61, 0x8e, 0x4f, 0x4d, 0x1c, 0x37, 0x04, 0xac, 0x80, 0x42, 0x1b, 0xd3, 0x5a, 0x50, 0xd4,
- 0x45, 0xbd, 0xc8, 0xdb, 0xe1, 0x13, 0x2e, 0x40, 0x81, 0x9c, 0x47, 0x62, 0xc7, 0x96, 0x77, 0xd0,
- 0x26, 0x2b, 0x4d, 0x09, 0xa8, 0x88, 0x64, 0x3b, 0xa9, 0x42, 0x69, 0xac, 0xda, 0xc9, 0x81, 0xd5,
- 0x21, 0xfd, 0x07, 0xef, 0x80, 0x19, 0x26, 0xff, 0x96, 0x3d, 0x75, 0x22, 0xeb, 0x2e, 0xa4, 0x6d,
- 0x3c, 0xfd, 0x43, 0x32, 0x14, 0x51, 0x41, 0x07, 0xcc, 0x53, 0x79, 0x04, 0xe1, 0x53, 0xbe, 0x02,
- 0xe7, 0xb2, 0xb8, 0xfb, 0xb3, 0x13, 0x5f, 0x36, 0xea, 0x25, 0x44, 0x49, 0x7e, 0xf8, 0x08, 0x2c,
- 0xf5, 0x84, 0x1d, 0xf8, 0x9c, 0x14, 0x3e, 0x2f, 0x66, 0xf9, 0x1c, 0x78, 0x29, 0x7a, 0x49, 0xba,
- 0x5d, 0xda, 0x4e, 0xd1, 0xa2, 0x3e, 0x47, 0xea, 0x2f, 0x39, 0x30, 0xe2, 0x61, 0x78, 0x05, 0x4b,
- 0xde, 0xfd, 0xc4, 0x92, 0xf7, 0xee, 0x8b, 0xbf, 0x78, 0x43, 0x97, 0xbe, 0x46, 0x6a, 0xe9, 0x7b,
- 0xff, 0x25, 0x7c, 0x8c, 0x5e, 0x02, 0xff, 0xcc, 0x81, 0xff, 0x0e, 0x37, 0x8e, 0x97, 0xc2, 0x1b,
- 0x89, 0x11, 0x7b, 0x29, 0x35, 0x62, 0x4f, 0x8c, 0x41, 0xf1, 0xef, 0x92, 0x98, 0x5a, 0x12, 0x7f,
- 0x55, 0x40, 0x79, 0x78, 0xde, 0x5e, 0xc1, 0xd2, 0xf8, 0x45, 0x72, 0x69, 0x7c, 0xeb, 0xc5, 0x8b,
- 0x6c, 0xc8, 0x12, 0x79, 0x6d, 0x54, 0x6d, 0x45, 0xeb, 0xde, 0x18, 0x4f, 0xfe, 0xcf, 0x23, 0x53,
- 0x25, 0xb6, 0xd3, 0x8c, 0x5f, 0x2d, 0x09, 0xeb, 0x0f, 0x6c, 0xfe, 0xf4, 0xb4, 0xf8, 0xeb, 0x11,
- 0x14, 0x64, 0x03, 0x4c, 0x37, 0x83, 0xb7, 0x5a, 0x36, 0xf5, 0x95, 0xb1, 0x9e, 0xc8, 0x51, 0x4f,
- 0x7b, 0xb0, 0x16, 0x48, 0x18, 0x0a, 0xe9, 0xd5, 0xef, 0x14, 0xb0, 0x9e, 0xd5, 0xac, 0xf0, 0x60,
- 0xc0, 0xf2, 0xf5, 0x12, 0x8b, 0xf5, 0xf8, 0xcb, 0xd8, 0x8f, 0x0a, 0x38, 0x3a, 0x68, 0xc7, 0xe1,
- 0xe5, 0xcf, 0x17, 0x9b, 0x68, 0x2b, 0x89, 0xca, 0xff, 0x96, 0x90, 0x22, 0xa9, 0x85, 0xa7, 0xc0,
- 0x4c, 0xc3, 0xb0, 0xeb, 0xbb, 0xe4, 0xcb, 0x70, 0xdf, 0x8e, 0x0a, 0xf0, 0x23, 0x29, 0x47, 0x11,
- 0x02, 0x5e, 0x05, 0x4b, 0xc2, 0x6e, 0x13, 0xdb, 0x96, 0xd7, 0x10, 0xb9, 0x92, 0x4b, 0x43, 0xf4,
- 0x1e, 0xdc, 0x4a, 0xe9, 0x51, 0x9f, 0x85, 0xfa, 0x97, 0x02, 0xe0, 0x8b, 0xbc, 0xf3, 0x27, 0x41,
- 0xd1, 0x70, 0x89, 0x58, 0x3e, 0x83, 0x16, 0x28, 0xea, 0xf3, 0xdd, 0x4e, 0xa5, 0x78, 0x65, 0xe7,
- 0x7a, 0x20, 0x44, 0xb1, 0x9e, 0x83, 0xc3, 0x27, 0x30, 0x78, 0xea, 0x24, 0x38, 0x74, 0xcc, 0x50,
- 0xac, 0x87, 0x97, 0xc1, 0x9c, 0xd9, 0xf4, 0x99, 0x87, 0xe9, 0xae, 0xe9, 0xb8, 0x58, 0x8c, 0x8c,
- 0x19, 0xfd, 0xa8, 0x8c, 0x69, 0x6e, 0xa3, 0x47, 0x87, 0x12, 0x48, 0xa8, 0x01, 0xc0, 0x0b, 0x9e,
- 0xb9, 0x06, 0xf7, 0x53, 0x10, 0x7e, 0x16, 0xf8, 0x85, 0x6d, 0x47, 0x52, 0xd4, 0x83, 0x50, 0x1f,
- 0x80, 0x95, 0x5d, 0x4c, 0xdb, 0xc4, 0xc4, 0x57, 0x4c, 0xd3, 0xf1, 0x6d, 0x2f, 0x5c, 0xa3, 0xab,
- 0xa0, 0x18, 0xc1, 0x64, 0x4f, 0x1c, 0x91, 0xfe, 0x8b, 0x11, 0x17, 0x8a, 0x31, 0x51, 0x13, 0xe6,
- 0x86, 0x37, 0x61, 0x0e, 0x4c, 0xc7, 0xf4, 0xf9, 0x7d, 0x62, 0xd7, 0x25, 0xf3, 0xb1, 0x10, 0x7d,
- 0x83, 0xd8, 0xf5, 0xe7, 0x9d, 0xca, 0xac, 0x84, 0xf1, 0x4f, 0x24, 0x80, 0xf0, 0x3a, 0xc8, 0xfb,
- 0x0c, 0x53, 0xd9, 0x5e, 0x27, 0xb3, 0x8a, 0xf9, 0x0e, 0xc3, 0x34, 0xdc, 0x7c, 0x66, 0x38, 0x33,
- 0x17, 0x20, 0x41, 0x01, 0xb7, 0x40, 0xc1, 0xe2, 0x97, 0x22, 0xa7, 0xfe, 0xa9, 0x2c, 0xae, 0xde,
- 0x9f, 0x17, 0x41, 0x19, 0x08, 0x09, 0x0a, 0x58, 0xe0, 0x43, 0xb0, 0xc0, 0x12, 0x29, 0x14, 0xd7,
- 0x35, 0xc6, 0x26, 0x33, 0x30, 0xf1, 0x3a, 0xec, 0x76, 0x2a, 0x0b, 0x49, 0x15, 0x4a, 0x39, 0x50,
- 0xab, 0x60, 0xb6, 0x27, 0xc0, 0xec, 0xf9, 0xa7, 0x5f, 0x7d, 0xfc, 0xac, 0x3c, 0xf1, 0xe4, 0x59,
- 0x79, 0xe2, 0xe9, 0xb3, 0xf2, 0xc4, 0x57, 0xdd, 0xb2, 0xf2, 0xb8, 0x5b, 0x56, 0x9e, 0x74, 0xcb,
- 0xca, 0xd3, 0x6e, 0x59, 0xf9, 0xad, 0x5b, 0x56, 0xbe, 0xf9, 0xbd, 0x3c, 0x71, 0xb7, 0x3c, 0xfa,
- 0xff, 0x8c, 0xff, 0x04, 0x00, 0x00, 0xff, 0xff, 0x98, 0x4a, 0x24, 0x86, 0xa1, 0x14, 0x00, 0x00,
+ // 1604 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x58, 0xcb, 0x73, 0xdb, 0x54,
+ 0x17, 0x8f, 0x1c, 0x3b, 0x89, 0x4f, 0x9e, 0xbd, 0x69, 0x26, 0xfe, 0xd2, 0x6f, 0xec, 0x54, 0xdf,
+ 0xcc, 0x57, 0xa0, 0xad, 0xdc, 0x27, 0x2d, 0x30, 0x3c, 0xaa, 0xb4, 0x94, 0xd2, 0x24, 0x4d, 0x6f,
+ 0x5a, 0xe8, 0x94, 0xce, 0x50, 0x59, 0xbe, 0xb1, 0xd5, 0x58, 0x8f, 0xea, 0x4a, 0x0e, 0xa1, 0x1b,
+ 0x86, 0xbf, 0x80, 0x35, 0x2c, 0x59, 0xb0, 0x62, 0xc3, 0x96, 0x05, 0x4b, 0x3a, 0xac, 0xba, 0xec,
+ 0xca, 0x50, 0xb3, 0xe2, 0x3f, 0x80, 0xce, 0x30, 0xc3, 0xdc, 0xab, 0x2b, 0xc9, 0xf2, 0x4b, 0x9e,
+ 0x74, 0xa6, 0x2b, 0x76, 0xd1, 0x79, 0xfc, 0xce, 0xbd, 0xe7, 0x9e, 0xc7, 0xcf, 0x81, 0xab, 0xbb,
+ 0x17, 0xa9, 0x62, 0xd8, 0xe5, 0x5d, 0xbf, 0x42, 0x5c, 0x8b, 0x78, 0x84, 0x96, 0x9b, 0xc4, 0xaa,
+ 0xda, 0x6e, 0x59, 0x28, 0x34, 0xc7, 0x28, 0xef, 0x34, 0xec, 0x3d, 0xdd, 0xb6, 0x3c, 0xd7, 0x6e,
+ 0x94, 0x9b, 0xa7, 0x2b, 0xc4, 0xd3, 0xce, 0x96, 0x6b, 0xc4, 0x22, 0xae, 0xe6, 0x91, 0xaa, 0xe2,
+ 0xb8, 0xb6, 0x67, 0xa3, 0x62, 0x60, 0xaf, 0x68, 0x8e, 0xa1, 0x74, 0xd8, 0x2b, 0xc2, 0x7e, 0xe5,
+ 0x64, 0xcd, 0xf0, 0xea, 0x7e, 0x45, 0xd1, 0x6d, 0xb3, 0x5c, 0xb3, 0x6b, 0x76, 0x99, 0xbb, 0x55,
+ 0xfc, 0x1d, 0xfe, 0xc5, 0x3f, 0xf8, 0x5f, 0x01, 0xdc, 0xca, 0xb9, 0x38, 0xbc, 0xa9, 0xe9, 0x75,
+ 0xc3, 0x22, 0xee, 0x7e, 0xd9, 0xd9, 0xad, 0x31, 0x01, 0x2d, 0x9b, 0xc4, 0xd3, 0xca, 0xcd, 0xd3,
+ 0xdd, 0x87, 0x58, 0x29, 0x0f, 0xf2, 0x72, 0x7d, 0xcb, 0x33, 0x4c, 0xd2, 0xe3, 0xf0, 0x7a, 0x9a,
+ 0x03, 0xd5, 0xeb, 0xc4, 0xd4, 0xba, 0xfd, 0xe4, 0x1f, 0x25, 0x58, 0xbd, 0xf2, 0x19, 0x31, 0x1d,
+ 0x6f, 0xcb, 0x35, 0x6c, 0xd7, 0xf0, 0xf6, 0xd7, 0x49, 0x93, 0x34, 0xd6, 0x6c, 0x6b, 0xc7, 0xa8,
+ 0xf9, 0xae, 0xe6, 0x19, 0xb6, 0x85, 0xee, 0x40, 0xc1, 0xb2, 0x4d, 0xc3, 0xd2, 0x98, 0x5c, 0xf7,
+ 0x5d, 0x97, 0x58, 0xfa, 0xfe, 0x76, 0x5d, 0x73, 0x09, 0x2d, 0x48, 0xab, 0xd2, 0x2b, 0x39, 0xf5,
+ 0xbf, 0xed, 0x56, 0xa9, 0xb0, 0x39, 0xc0, 0x06, 0x0f, 0xf4, 0x46, 0x6f, 0xc3, 0x7c, 0x83, 0x58,
+ 0x55, 0xad, 0xd2, 0x20, 0x5b, 0xc4, 0xd5, 0x89, 0xe5, 0x15, 0x32, 0x1c, 0x70, 0xb1, 0xdd, 0x2a,
+ 0xcd, 0xaf, 0x27, 0x55, 0xb8, 0xdb, 0x56, 0xbe, 0x0b, 0xcb, 0xef, 0x37, 0xec, 0xbd, 0xcb, 0x06,
+ 0xf5, 0x0c, 0xab, 0xe6, 0x1b, 0xb4, 0x4e, 0xdc, 0x0d, 0xe2, 0xd5, 0xed, 0x2a, 0x7a, 0x17, 0xb2,
+ 0xde, 0xbe, 0x43, 0xf8, 0xf9, 0xf2, 0xea, 0xf1, 0xc7, 0xad, 0xd2, 0x58, 0xbb, 0x55, 0xca, 0xde,
+ 0xda, 0x77, 0xc8, 0xf3, 0x56, 0xe9, 0xc8, 0x00, 0x37, 0xa6, 0xc6, 0xdc, 0x51, 0xfe, 0x3a, 0x03,
+ 0xc0, 0xac, 0xb6, 0x79, 0xe2, 0xd0, 0x7d, 0x98, 0x62, 0x8f, 0x55, 0xd5, 0x3c, 0x8d, 0x63, 0x4e,
+ 0x9f, 0x39, 0xa5, 0xc4, 0x95, 0x12, 0xe5, 0x5c, 0x71, 0x76, 0x6b, 0x4c, 0x40, 0x15, 0x66, 0xad,
+ 0x34, 0x4f, 0x2b, 0x37, 0x2a, 0x0f, 0x88, 0xee, 0x6d, 0x10, 0x4f, 0x53, 0x91, 0x38, 0x05, 0xc4,
+ 0x32, 0x1c, 0xa1, 0xa2, 0x2d, 0xc8, 0x52, 0x87, 0xe8, 0x3c, 0x01, 0xd3, 0x67, 0x14, 0x65, 0x78,
+ 0x1d, 0x2a, 0xf1, 0xd9, 0xb6, 0x1d, 0xa2, 0xab, 0x33, 0xe1, 0x0d, 0xd9, 0x17, 0xe6, 0x48, 0xe8,
+ 0x0e, 0x4c, 0x50, 0x4f, 0xf3, 0x7c, 0x5a, 0x18, 0xef, 0x39, 0x71, 0x1a, 0x26, 0xf7, 0x53, 0xe7,
+ 0x04, 0xea, 0x44, 0xf0, 0x8d, 0x05, 0x9e, 0xfc, 0x34, 0x03, 0x8b, 0xb1, 0xf1, 0x9a, 0x6d, 0x55,
+ 0x0d, 0x5e, 0x29, 0x6f, 0x25, 0xb2, 0x7e, 0xac, 0x2b, 0xeb, 0xcb, 0x7d, 0x5c, 0xe2, 0x8c, 0xa3,
+ 0x37, 0xa2, 0xe3, 0x66, 0xb8, 0xfb, 0xd1, 0x64, 0xf0, 0xe7, 0xad, 0xd2, 0x7c, 0xe4, 0x96, 0x3c,
+ 0x0f, 0x6a, 0x02, 0x6a, 0x68, 0xd4, 0xbb, 0xe5, 0x6a, 0x16, 0x0d, 0x60, 0x0d, 0x93, 0x88, 0x5b,
+ 0xbf, 0x36, 0xda, 0x3b, 0x31, 0x0f, 0x75, 0x45, 0x84, 0x44, 0xeb, 0x3d, 0x68, 0xb8, 0x4f, 0x04,
+ 0xf4, 0x7f, 0x98, 0x70, 0x89, 0x46, 0x6d, 0xab, 0x90, 0xe5, 0x47, 0x8e, 0xf2, 0x85, 0xb9, 0x14,
+ 0x0b, 0x2d, 0x7a, 0x15, 0x26, 0x4d, 0x42, 0xa9, 0x56, 0x23, 0x85, 0x1c, 0x37, 0x9c, 0x17, 0x86,
+ 0x93, 0x1b, 0x81, 0x18, 0x87, 0x7a, 0xf9, 0x27, 0x09, 0xe6, 0xe2, 0x3c, 0xad, 0x1b, 0xd4, 0x43,
+ 0xf7, 0x7a, 0x6a, 0x4f, 0x19, 0xed, 0x4e, 0xcc, 0x9b, 0x57, 0xde, 0x82, 0x08, 0x37, 0x15, 0x4a,
+ 0x3a, 0xea, 0xee, 0x06, 0xe4, 0x0c, 0x8f, 0x98, 0x2c, 0xeb, 0xe3, 0x5d, 0xe9, 0x4a, 0x29, 0x12,
+ 0x75, 0x56, 0xc0, 0xe6, 0xae, 0x31, 0x00, 0x1c, 0xe0, 0xc8, 0x7f, 0x8c, 0x77, 0xde, 0x80, 0xd5,
+ 0x23, 0xfa, 0x4e, 0x82, 0x15, 0x67, 0xe0, 0x80, 0x11, 0x97, 0x5a, 0x4b, 0x8b, 0x3c, 0x78, 0x44,
+ 0x61, 0xb2, 0x43, 0xd8, 0x5c, 0x21, 0xaa, 0x2c, 0x8e, 0xb4, 0x32, 0xc4, 0x78, 0xc8, 0x51, 0xd0,
+ 0x87, 0x80, 0x4c, 0xcd, 0x63, 0x19, 0xad, 0x6d, 0xb9, 0x44, 0x27, 0x55, 0x86, 0x2a, 0x86, 0x52,
+ 0x54, 0x1d, 0x1b, 0x3d, 0x16, 0xb8, 0x8f, 0x17, 0xfa, 0x52, 0x82, 0xc5, 0x6a, 0xef, 0x90, 0x11,
+ 0x75, 0x79, 0x61, 0x94, 0x44, 0xf7, 0x99, 0x51, 0xea, 0x72, 0xbb, 0x55, 0x5a, 0xec, 0xa3, 0xc0,
+ 0xfd, 0x82, 0xa1, 0x7b, 0x90, 0x73, 0xfd, 0x06, 0xa1, 0x85, 0x2c, 0x7f, 0xde, 0xd4, 0xa8, 0x5b,
+ 0x76, 0xc3, 0xd0, 0xf7, 0x31, 0x73, 0xf9, 0xd8, 0xf0, 0xea, 0xdb, 0x3e, 0x9f, 0x55, 0x34, 0x7e,
+ 0x6b, 0xae, 0xc2, 0x01, 0xa8, 0xfc, 0x08, 0x16, 0xba, 0x87, 0x06, 0xaa, 0x01, 0xe8, 0x61, 0x9f,
+ 0xb2, 0x05, 0xc1, 0xc2, 0x9e, 0x1d, 0xbd, 0xaa, 0xa2, 0x1e, 0x8f, 0xe7, 0x65, 0x24, 0xa2, 0xb8,
+ 0x03, 0x5a, 0x3e, 0x05, 0x33, 0x57, 0x5d, 0xdb, 0x77, 0xc4, 0x19, 0xd1, 0x2a, 0x64, 0x2d, 0xcd,
+ 0x0c, 0xa7, 0x4f, 0x34, 0x11, 0x37, 0x35, 0x93, 0x60, 0xae, 0x91, 0xbf, 0x95, 0x60, 0x76, 0xdd,
+ 0x30, 0x0d, 0x0f, 0x13, 0xea, 0xd8, 0x16, 0x25, 0xe8, 0x7c, 0x62, 0x62, 0x1d, 0xed, 0x9a, 0x58,
+ 0x87, 0x12, 0xc6, 0x1d, 0xb3, 0xea, 0x13, 0x98, 0x7c, 0xe8, 0x13, 0xdf, 0xb0, 0x6a, 0x62, 0x5e,
+ 0x9f, 0x4b, 0xbb, 0xe0, 0xcd, 0xc0, 0x3c, 0x51, 0x6d, 0xea, 0x34, 0x1b, 0x01, 0x42, 0x83, 0x43,
+ 0x44, 0xf9, 0xef, 0x0c, 0x1c, 0xe5, 0x81, 0x49, 0x75, 0xc8, 0x56, 0xbe, 0x97, 0xba, 0x95, 0x57,
+ 0xc5, 0x6d, 0x0e, 0xb2, 0x99, 0x1f, 0xc0, 0x6c, 0xa3, 0xf3, 0xee, 0xe2, 0x9a, 0x27, 0xd3, 0xae,
+ 0x99, 0x48, 0x98, 0xba, 0x24, 0x4e, 0x90, 0x4c, 0x3a, 0x4e, 0x42, 0xf7, 0x63, 0x01, 0xe3, 0xa3,
+ 0xb3, 0x00, 0x74, 0x03, 0x96, 0x2a, 0xb6, 0xeb, 0xda, 0x7b, 0x86, 0x55, 0xe3, 0x71, 0x42, 0x90,
+ 0x2c, 0x07, 0xf9, 0x4f, 0xbb, 0x55, 0x5a, 0x52, 0xfb, 0x19, 0xe0, 0xfe, 0x7e, 0xf2, 0x1e, 0x2c,
+ 0x6d, 0xb2, 0x99, 0x42, 0x6d, 0xdf, 0xd5, 0x49, 0xdc, 0x10, 0xa8, 0x04, 0xb9, 0x26, 0x71, 0x2b,
+ 0x41, 0x51, 0xe7, 0xd5, 0x3c, 0x6b, 0x87, 0x8f, 0x98, 0x00, 0x07, 0x72, 0x76, 0x13, 0x2b, 0xf6,
+ 0xbc, 0x8d, 0xd7, 0x69, 0x61, 0x82, 0x9b, 0xf2, 0x9b, 0x6c, 0x26, 0x55, 0xb8, 0xdb, 0x56, 0x6e,
+ 0x65, 0x60, 0x79, 0x40, 0xff, 0xa1, 0xdb, 0x30, 0x45, 0xc5, 0xdf, 0xa2, 0xa7, 0x8e, 0xa5, 0xbd,
+ 0x85, 0xf0, 0x8d, 0xa7, 0x7f, 0x08, 0x86, 0x23, 0x28, 0x64, 0xc3, 0xac, 0x2b, 0x8e, 0xc0, 0x63,
+ 0x8a, 0x2d, 0x70, 0x26, 0x0d, 0xbb, 0x37, 0x3b, 0xf1, 0x63, 0xe3, 0x4e, 0x40, 0x9c, 0xc4, 0x47,
+ 0x8f, 0x60, 0xa1, 0xe3, 0xda, 0x41, 0xcc, 0x71, 0x1e, 0xf3, 0x7c, 0x5a, 0xcc, 0xbe, 0x8f, 0xa2,
+ 0x16, 0x44, 0xd8, 0x85, 0xcd, 0x2e, 0x58, 0xdc, 0x13, 0x48, 0xfe, 0x25, 0x03, 0x43, 0x16, 0xc3,
+ 0x4b, 0x20, 0x79, 0xf7, 0x13, 0x24, 0xef, 0x9d, 0x83, 0x6f, 0xbc, 0x81, 0xa4, 0xaf, 0xde, 0x45,
+ 0xfa, 0xde, 0x7b, 0x81, 0x18, 0xc3, 0x49, 0xe0, 0x9f, 0x19, 0xf8, 0xdf, 0x60, 0xe7, 0x98, 0x14,
+ 0x5e, 0x4f, 0x8c, 0xd8, 0x0b, 0x5d, 0x23, 0xf6, 0xd8, 0x08, 0x10, 0xff, 0x92, 0xc4, 0x2e, 0x92,
+ 0xf8, 0xab, 0x04, 0xc5, 0xc1, 0x79, 0x7b, 0x09, 0xa4, 0xf1, 0xd3, 0x24, 0x69, 0x7c, 0xf3, 0xe0,
+ 0x45, 0x36, 0x80, 0x44, 0x5e, 0x1d, 0x56, 0x5b, 0x11, 0xdd, 0x1b, 0x61, 0xe5, 0x7f, 0x9f, 0x19,
+ 0x96, 0x2a, 0xce, 0x4e, 0x53, 0x7e, 0xb5, 0x24, 0xbc, 0xaf, 0x58, 0x6c, 0xf5, 0x98, 0x6c, 0x7b,
+ 0x04, 0x05, 0x59, 0x87, 0xc9, 0x46, 0xb0, 0xab, 0x45, 0x53, 0x5f, 0x1a, 0x69, 0x45, 0x0e, 0x5b,
+ 0xed, 0x01, 0x2d, 0x10, 0x66, 0x38, 0x84, 0x47, 0x55, 0x98, 0x20, 0xfc, 0xa7, 0xfa, 0xa8, 0x9d,
+ 0x9d, 0xf6, 0xc3, 0x5e, 0x05, 0x56, 0x85, 0x81, 0x15, 0x16, 0xd8, 0xf2, 0x37, 0x12, 0xac, 0xa6,
+ 0x8d, 0x04, 0xb4, 0xd7, 0x87, 0xe2, 0xbd, 0x00, 0x7d, 0x1f, 0x9d, 0xf2, 0xfd, 0x20, 0xc1, 0xe1,
+ 0x7e, 0x4c, 0x8a, 0x35, 0x19, 0xa3, 0x4f, 0x11, 0xf7, 0x89, 0x9a, 0xec, 0x26, 0x97, 0x62, 0xa1,
+ 0x45, 0x27, 0x60, 0xaa, 0xae, 0x59, 0xd5, 0x6d, 0xe3, 0xf3, 0x90, 0xd5, 0x47, 0x65, 0xfe, 0x81,
+ 0x90, 0xe3, 0xc8, 0x02, 0x5d, 0x86, 0x05, 0xee, 0xb7, 0x4e, 0xac, 0x9a, 0x57, 0xe7, 0x2f, 0x22,
+ 0xa8, 0x49, 0xb4, 0x75, 0x6e, 0x76, 0xe9, 0x71, 0x8f, 0x87, 0xfc, 0x97, 0x04, 0xe8, 0x20, 0x6c,
+ 0xe2, 0x38, 0xe4, 0x35, 0xc7, 0xe0, 0x14, 0x37, 0x68, 0xb4, 0xbc, 0x3a, 0xdb, 0x6e, 0x95, 0xf2,
+ 0x97, 0xb6, 0xae, 0x05, 0x42, 0x1c, 0xeb, 0x99, 0x71, 0xb8, 0x68, 0x83, 0x85, 0x2a, 0x8c, 0xc3,
+ 0xc0, 0x14, 0xc7, 0x7a, 0x74, 0x11, 0x66, 0xf4, 0x86, 0x4f, 0x3d, 0xe2, 0x6e, 0xeb, 0xb6, 0x43,
+ 0xf8, 0x60, 0x9a, 0x52, 0x0f, 0x8b, 0x3b, 0xcd, 0xac, 0x75, 0xe8, 0x70, 0xc2, 0x12, 0x29, 0x00,
+ 0xac, 0xad, 0xa8, 0xa3, 0xb1, 0x38, 0x39, 0x1e, 0x67, 0x8e, 0x3d, 0xd8, 0x66, 0x24, 0xc5, 0x1d,
+ 0x16, 0xf2, 0x03, 0x58, 0xda, 0x26, 0x6e, 0xd3, 0xd0, 0xc9, 0x25, 0x5d, 0xb7, 0x7d, 0xcb, 0x0b,
+ 0xc9, 0x7a, 0x19, 0xf2, 0x91, 0x99, 0xe8, 0xbc, 0x43, 0x22, 0x7e, 0x3e, 0xc2, 0xc2, 0xb1, 0x4d,
+ 0xd4, 0xea, 0x99, 0x81, 0xad, 0xfe, 0x73, 0x06, 0x26, 0x63, 0xf8, 0xec, 0xae, 0x61, 0x55, 0x05,
+ 0xf2, 0x91, 0xd0, 0xfa, 0xba, 0x61, 0x55, 0x9f, 0xb7, 0x4a, 0xd3, 0xc2, 0x8c, 0x7d, 0x62, 0x6e,
+ 0x88, 0xae, 0x41, 0xd6, 0xa7, 0xc4, 0x15, 0x4d, 0x7c, 0x3c, 0xad, 0x98, 0x6f, 0x53, 0xe2, 0x86,
+ 0xfc, 0x6a, 0x8a, 0x21, 0x33, 0x01, 0xe6, 0x10, 0x68, 0x03, 0x72, 0x35, 0xf6, 0x28, 0xa2, 0x4f,
+ 0x4f, 0xa4, 0x61, 0x75, 0xfe, 0x88, 0x09, 0xca, 0x80, 0x4b, 0x70, 0x80, 0x82, 0x1e, 0xc2, 0x1c,
+ 0x4d, 0xa4, 0x90, 0x3f, 0xd7, 0x08, 0x7c, 0xa9, 0x6f, 0xe2, 0x55, 0xd4, 0x6e, 0x95, 0xe6, 0x92,
+ 0x2a, 0xdc, 0x15, 0x40, 0x2e, 0xc3, 0x74, 0xc7, 0x05, 0xd3, 0xa7, 0xac, 0x7a, 0xf9, 0xf1, 0xb3,
+ 0xe2, 0xd8, 0x93, 0x67, 0xc5, 0xb1, 0xa7, 0xcf, 0x8a, 0x63, 0x5f, 0xb4, 0x8b, 0xd2, 0xe3, 0x76,
+ 0x51, 0x7a, 0xd2, 0x2e, 0x4a, 0x4f, 0xdb, 0x45, 0xe9, 0xb7, 0x76, 0x51, 0xfa, 0xea, 0xf7, 0xe2,
+ 0xd8, 0xdd, 0xe2, 0xf0, 0xff, 0xc5, 0xfe, 0x13, 0x00, 0x00, 0xff, 0xff, 0x1d, 0xc5, 0x22, 0x46,
+ 0xc5, 0x15, 0x00, 0x00,
+}
+
+func (m *ExemptPriorityLevelConfiguration) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ExemptPriorityLevelConfiguration) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ExemptPriorityLevelConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.LendablePercent != nil {
+ i = encodeVarintGenerated(dAtA, i, uint64(*m.LendablePercent))
+ i--
+ dAtA[i] = 0x10
+ }
+ if m.NominalConcurrencyShares != nil {
+ i = encodeVarintGenerated(dAtA, i, uint64(*m.NominalConcurrencyShares))
+ i--
+ dAtA[i] = 0x8
+ }
+ return len(dAtA) - i, nil
}
func (m *FlowDistinguisherMethod) Marshal() (dAtA []byte, err error) {
@@ -1490,6 +1556,18 @@ func (m *PriorityLevelConfigurationSpec) MarshalToSizedBuffer(dAtA []byte) (int,
_ = i
var l int
_ = l
+ if m.Exempt != nil {
+ {
+ size, err := m.Exempt.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
if m.Limited != nil {
{
size, err := m.Limited.MarshalToSizedBuffer(dAtA[:i])
@@ -1782,6 +1860,21 @@ func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
dAtA[offset] = uint8(v)
return base
}
+func (m *ExemptPriorityLevelConfiguration) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.NominalConcurrencyShares != nil {
+ n += 1 + sovGenerated(uint64(*m.NominalConcurrencyShares))
+ }
+ if m.LendablePercent != nil {
+ n += 1 + sovGenerated(uint64(*m.LendablePercent))
+ }
+ return n
+}
+
func (m *FlowDistinguisherMethod) Size() (n int) {
if m == nil {
return 0
@@ -2047,6 +2140,10 @@ func (m *PriorityLevelConfigurationSpec) Size() (n int) {
l = m.Limited.Size()
n += 1 + l + sovGenerated(uint64(l))
}
+ if m.Exempt != nil {
+ l = m.Exempt.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
return n
}
@@ -2164,6 +2261,17 @@ func sovGenerated(x uint64) (n int) {
func sozGenerated(x uint64) (n int) {
return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
+func (this *ExemptPriorityLevelConfiguration) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ExemptPriorityLevelConfiguration{`,
+ `NominalConcurrencyShares:` + valueToStringGenerated(this.NominalConcurrencyShares) + `,`,
+ `LendablePercent:` + valueToStringGenerated(this.LendablePercent) + `,`,
+ `}`,
+ }, "")
+ return s
+}
func (this *FlowDistinguisherMethod) String() string {
if this == nil {
return "nil"
@@ -2380,6 +2488,7 @@ func (this *PriorityLevelConfigurationSpec) String() string {
s := strings.Join([]string{`&PriorityLevelConfigurationSpec{`,
`Type:` + fmt.Sprintf("%v", this.Type) + `,`,
`Limited:` + strings.Replace(this.Limited.String(), "LimitedPriorityLevelConfiguration", "LimitedPriorityLevelConfiguration", 1) + `,`,
+ `Exempt:` + strings.Replace(this.Exempt.String(), "ExemptPriorityLevelConfiguration", "ExemptPriorityLevelConfiguration", 1) + `,`,
`}`,
}, "")
return s
@@ -2467,6 +2576,96 @@ func valueToStringGenerated(v interface{}) string {
pv := reflect.Indirect(rv).Interface()
return fmt.Sprintf("*%v", pv)
}
+func (m *ExemptPriorityLevelConfiguration) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ExemptPriorityLevelConfiguration: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ExemptPriorityLevelConfiguration: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NominalConcurrencyShares", wireType)
+ }
+ var v int32
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.NominalConcurrencyShares = &v
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LendablePercent", wireType)
+ }
+ var v int32
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.LendablePercent = &v
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
func (m *FlowDistinguisherMethod) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
@@ -4546,6 +4745,42 @@ func (m *PriorityLevelConfigurationSpec) Unmarshal(dAtA []byte) error {
return err
}
iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Exempt", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Exempt == nil {
+ m.Exempt = &ExemptPriorityLevelConfiguration{}
+ }
+ if err := m.Exempt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
diff --git a/vendor/k8s.io/api/flowcontrol/v1beta3/generated.proto b/vendor/k8s.io/api/flowcontrol/v1beta3/generated.proto
index adf9e8682c..eda0f7829e 100644
--- a/vendor/k8s.io/api/flowcontrol/v1beta3/generated.proto
+++ b/vendor/k8s.io/api/flowcontrol/v1beta3/generated.proto
@@ -28,6 +28,40 @@ import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
// Package-wide variables from generator "generated".
option go_package = "k8s.io/api/flowcontrol/v1beta3";
+// ExemptPriorityLevelConfiguration describes the configurable aspects
+// of the handling of exempt requests.
+// In the mandatory exempt configuration object the values in the fields
+// here can be modified by authorized users, unlike the rest of the `spec`.
+message ExemptPriorityLevelConfiguration {
+ // `nominalConcurrencyShares` (NCS) contributes to the computation of the
+ // NominalConcurrencyLimit (NominalCL) of this level.
+ // This is the number of execution seats nominally reserved for this priority level.
+ // This DOES NOT limit the dispatching from this priority level
+ // but affects the other priority levels through the borrowing mechanism.
+ // The server's concurrency limit (ServerCL) is divided among all the
+ // priority levels in proportion to their NCS values:
+ //
+ // NominalCL(i) = ceil( ServerCL * NCS(i) / sum_ncs )
+ // sum_ncs = sum[priority level k] NCS(k)
+ //
+ // Bigger numbers mean a larger nominal concurrency limit,
+ // at the expense of every other priority level.
+ // This field has a default value of zero.
+ // +optional
+ optional int32 nominalConcurrencyShares = 1;
+
+ // `lendablePercent` prescribes the fraction of the level's NominalCL that
+ // can be borrowed by other priority levels. This value of this
+ // field must be between 0 and 100, inclusive, and it defaults to 0.
+ // The number of seats that other levels can borrow from this level, known
+ // as this level's LendableConcurrencyLimit (LendableCL), is defined as follows.
+ //
+ // LendableCL(i) = round( NominalCL(i) * lendablePercent(i)/100.0 )
+ //
+ // +optional
+ optional int32 lendablePercent = 2;
+}
+
// FlowDistinguisherMethod specifies the method of a flow distinguisher.
message FlowDistinguisherMethod {
// `type` is the type of flow distinguisher method
@@ -168,10 +202,10 @@ message LimitedPriorityLevelConfiguration {
// Limited priority levels in proportion to their NCS values:
//
// NominalCL(i) = ceil( ServerCL * NCS(i) / sum_ncs )
- // sum_ncs = sum[limited priority level k] NCS(k)
+ // sum_ncs = sum[priority level k] NCS(k)
//
// Bigger numbers mean a larger nominal concurrency limit,
- // at the expense of every other Limited priority level.
+ // at the expense of every other priority level.
// This field has a default value of 30.
// +optional
optional int32 nominalConcurrencyShares = 1;
@@ -334,6 +368,14 @@ message PriorityLevelConfigurationSpec {
// This field must be non-empty if and only if `type` is `"Limited"`.
// +optional
optional LimitedPriorityLevelConfiguration limited = 2;
+
+ // `exempt` specifies how requests are handled for an exempt priority level.
+ // This field MUST be empty if `type` is `"Limited"`.
+ // This field MAY be non-empty if `type` is `"Exempt"`.
+ // If empty and `type` is `"Exempt"` then the default values
+ // for `ExemptPriorityLevelConfiguration` apply.
+ // +optional
+ optional ExemptPriorityLevelConfiguration exempt = 3;
}
// PriorityLevelConfigurationStatus represents the current state of a "request-priority".
diff --git a/vendor/k8s.io/api/flowcontrol/v1beta3/types.go b/vendor/k8s.io/api/flowcontrol/v1beta3/types.go
index 2baf2dc39e..810941557b 100644
--- a/vendor/k8s.io/api/flowcontrol/v1beta3/types.go
+++ b/vendor/k8s.io/api/flowcontrol/v1beta3/types.go
@@ -77,7 +77,9 @@ const (
// is a boolean false or has an invalid boolean representation
// (if the cluster operator sets it to 'false' it will be stomped)
// - any changes to the spec made by the cluster operator will be
- // stomped.
+ // stomped, except for changes to the `nominalConcurrencyShares`
+ // and `lendablePercent` fields of the PriorityLevelConfiguration
+ // named "exempt".
//
// The kube-apiserver will apply updates on the suggested configuration if:
// - the cluster operator has enabled auto-update by setting the annotation
@@ -433,6 +435,14 @@ type PriorityLevelConfigurationSpec struct {
// This field must be non-empty if and only if `type` is `"Limited"`.
// +optional
Limited *LimitedPriorityLevelConfiguration `json:"limited,omitempty" protobuf:"bytes,2,opt,name=limited"`
+
+ // `exempt` specifies how requests are handled for an exempt priority level.
+ // This field MUST be empty if `type` is `"Limited"`.
+ // This field MAY be non-empty if `type` is `"Exempt"`.
+ // If empty and `type` is `"Exempt"` then the default values
+ // for `ExemptPriorityLevelConfiguration` apply.
+ // +optional
+ Exempt *ExemptPriorityLevelConfiguration `json:"exempt,omitempty" protobuf:"bytes,3,opt,name=exempt"`
}
// PriorityLevelEnablement indicates whether limits on execution are enabled for the priority level
@@ -462,10 +472,10 @@ type LimitedPriorityLevelConfiguration struct {
// Limited priority levels in proportion to their NCS values:
//
// NominalCL(i) = ceil( ServerCL * NCS(i) / sum_ncs )
- // sum_ncs = sum[limited priority level k] NCS(k)
+ // sum_ncs = sum[priority level k] NCS(k)
//
// Bigger numbers mean a larger nominal concurrency limit,
- // at the expense of every other Limited priority level.
+ // at the expense of every other priority level.
// This field has a default value of 30.
// +optional
NominalConcurrencyShares int32 `json:"nominalConcurrencyShares" protobuf:"varint,1,opt,name=nominalConcurrencyShares"`
@@ -503,6 +513,43 @@ type LimitedPriorityLevelConfiguration struct {
BorrowingLimitPercent *int32 `json:"borrowingLimitPercent,omitempty" protobuf:"varint,4,opt,name=borrowingLimitPercent"`
}
+// ExemptPriorityLevelConfiguration describes the configurable aspects
+// of the handling of exempt requests.
+// In the mandatory exempt configuration object the values in the fields
+// here can be modified by authorized users, unlike the rest of the `spec`.
+type ExemptPriorityLevelConfiguration struct {
+ // `nominalConcurrencyShares` (NCS) contributes to the computation of the
+ // NominalConcurrencyLimit (NominalCL) of this level.
+ // This is the number of execution seats nominally reserved for this priority level.
+ // This DOES NOT limit the dispatching from this priority level
+ // but affects the other priority levels through the borrowing mechanism.
+ // The server's concurrency limit (ServerCL) is divided among all the
+ // priority levels in proportion to their NCS values:
+ //
+ // NominalCL(i) = ceil( ServerCL * NCS(i) / sum_ncs )
+ // sum_ncs = sum[priority level k] NCS(k)
+ //
+ // Bigger numbers mean a larger nominal concurrency limit,
+ // at the expense of every other priority level.
+ // This field has a default value of zero.
+ // +optional
+ NominalConcurrencyShares *int32 `json:"nominalConcurrencyShares,omitempty" protobuf:"varint,1,opt,name=nominalConcurrencyShares"`
+ // `lendablePercent` prescribes the fraction of the level's NominalCL that
+ // can be borrowed by other priority levels. This value of this
+ // field must be between 0 and 100, inclusive, and it defaults to 0.
+ // The number of seats that other levels can borrow from this level, known
+ // as this level's LendableConcurrencyLimit (LendableCL), is defined as follows.
+ //
+ // LendableCL(i) = round( NominalCL(i) * lendablePercent(i)/100.0 )
+ //
+ // +optional
+ LendablePercent *int32 `json:"lendablePercent,omitempty" protobuf:"varint,2,opt,name=lendablePercent"`
+ // The `BorrowingCL` of an Exempt priority level is implicitly `ServerCL`.
+ // In other words, an exempt priority level
+ // has no meaningful limit on how much it borrows.
+ // There is no explicit representation of that here.
+}
+
// LimitResponse defines how to handle requests that can not be executed right now.
// +union
type LimitResponse struct {
diff --git a/vendor/k8s.io/api/flowcontrol/v1beta3/types_swagger_doc_generated.go b/vendor/k8s.io/api/flowcontrol/v1beta3/types_swagger_doc_generated.go
index 728252c0cf..fa76112a72 100644
--- a/vendor/k8s.io/api/flowcontrol/v1beta3/types_swagger_doc_generated.go
+++ b/vendor/k8s.io/api/flowcontrol/v1beta3/types_swagger_doc_generated.go
@@ -27,6 +27,16 @@ package v1beta3
// Those methods can be generated by using hack/update-codegen.sh
// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT.
+var map_ExemptPriorityLevelConfiguration = map[string]string{
+ "": "ExemptPriorityLevelConfiguration describes the configurable aspects of the handling of exempt requests. In the mandatory exempt configuration object the values in the fields here can be modified by authorized users, unlike the rest of the `spec`.",
+ "nominalConcurrencyShares": "`nominalConcurrencyShares` (NCS) contributes to the computation of the NominalConcurrencyLimit (NominalCL) of this level. This is the number of execution seats nominally reserved for this priority level. This DOES NOT limit the dispatching from this priority level but affects the other priority levels through the borrowing mechanism. The server's concurrency limit (ServerCL) is divided among all the priority levels in proportion to their NCS values:\n\nNominalCL(i) = ceil( ServerCL * NCS(i) / sum_ncs ) sum_ncs = sum[priority level k] NCS(k)\n\nBigger numbers mean a larger nominal concurrency limit, at the expense of every other priority level. This field has a default value of zero.",
+ "lendablePercent": "`lendablePercent` prescribes the fraction of the level's NominalCL that can be borrowed by other priority levels. This value of this field must be between 0 and 100, inclusive, and it defaults to 0. The number of seats that other levels can borrow from this level, known as this level's LendableConcurrencyLimit (LendableCL), is defined as follows.\n\nLendableCL(i) = round( NominalCL(i) * lendablePercent(i)/100.0 )",
+}
+
+func (ExemptPriorityLevelConfiguration) SwaggerDoc() map[string]string {
+ return map_ExemptPriorityLevelConfiguration
+}
+
var map_FlowDistinguisherMethod = map[string]string{
"": "FlowDistinguisherMethod specifies the method of a flow distinguisher.",
"type": "`type` is the type of flow distinguisher method The supported types are \"ByUser\" and \"ByNamespace\". Required.",
@@ -112,7 +122,7 @@ func (LimitResponse) SwaggerDoc() map[string]string {
var map_LimitedPriorityLevelConfiguration = map[string]string{
"": "LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. It addresses two issues:\n - How are requests for this priority level limited?\n - What should be done with requests that exceed the limit?",
- "nominalConcurrencyShares": "`nominalConcurrencyShares` (NCS) contributes to the computation of the NominalConcurrencyLimit (NominalCL) of this level. This is the number of execution seats available at this priority level. This is used both for requests dispatched from this priority level as well as requests dispatched from other priority levels borrowing seats from this level. The server's concurrency limit (ServerCL) is divided among the Limited priority levels in proportion to their NCS values:\n\nNominalCL(i) = ceil( ServerCL * NCS(i) / sum_ncs ) sum_ncs = sum[limited priority level k] NCS(k)\n\nBigger numbers mean a larger nominal concurrency limit, at the expense of every other Limited priority level. This field has a default value of 30.",
+ "nominalConcurrencyShares": "`nominalConcurrencyShares` (NCS) contributes to the computation of the NominalConcurrencyLimit (NominalCL) of this level. This is the number of execution seats available at this priority level. This is used both for requests dispatched from this priority level as well as requests dispatched from other priority levels borrowing seats from this level. The server's concurrency limit (ServerCL) is divided among the Limited priority levels in proportion to their NCS values:\n\nNominalCL(i) = ceil( ServerCL * NCS(i) / sum_ncs ) sum_ncs = sum[priority level k] NCS(k)\n\nBigger numbers mean a larger nominal concurrency limit, at the expense of every other priority level. This field has a default value of 30.",
"limitResponse": "`limitResponse` indicates what to do with requests that can not be executed right now",
"lendablePercent": "`lendablePercent` prescribes the fraction of the level's NominalCL that can be borrowed by other priority levels. The value of this field must be between 0 and 100, inclusive, and it defaults to 0. The number of seats that other levels can borrow from this level, known as this level's LendableConcurrencyLimit (LendableCL), is defined as follows.\n\nLendableCL(i) = round( NominalCL(i) * lendablePercent(i)/100.0 )",
"borrowingLimitPercent": "`borrowingLimitPercent`, if present, configures a limit on how many seats this priority level can borrow from other priority levels. The limit is known as this level's BorrowingConcurrencyLimit (BorrowingCL) and is a limit on the total number of seats that this level may borrow at any one time. This field holds the ratio of that limit to the level's nominal concurrency limit. When this field is non-nil, it must hold a non-negative integer and the limit is calculated as follows.\n\nBorrowingCL(i) = round( NominalCL(i) * borrowingLimitPercent(i)/100.0 )\n\nThe value of this field can be more than 100, implying that this priority level can borrow a number of seats that is greater than its own nominal concurrency limit (NominalCL). When this field is left `nil`, the limit is effectively infinite.",
@@ -190,6 +200,7 @@ var map_PriorityLevelConfigurationSpec = map[string]string{
"": "PriorityLevelConfigurationSpec specifies the configuration of a priority level.",
"type": "`type` indicates whether this priority level is subject to limitation on request execution. A value of `\"Exempt\"` means that requests of this priority level are not subject to a limit (and thus are never queued) and do not detract from the capacity made available to other priority levels. A value of `\"Limited\"` means that (a) requests of this priority level _are_ subject to limits and (b) some of the server's limited capacity is made available exclusively to this priority level. Required.",
"limited": "`limited` specifies how requests are handled for a Limited priority level. This field must be non-empty if and only if `type` is `\"Limited\"`.",
+ "exempt": "`exempt` specifies how requests are handled for an exempt priority level. This field MUST be empty if `type` is `\"Limited\"`. This field MAY be non-empty if `type` is `\"Exempt\"`. If empty and `type` is `\"Exempt\"` then the default values for `ExemptPriorityLevelConfiguration` apply.",
}
func (PriorityLevelConfigurationSpec) SwaggerDoc() map[string]string {
diff --git a/vendor/k8s.io/api/flowcontrol/v1beta3/zz_generated.deepcopy.go b/vendor/k8s.io/api/flowcontrol/v1beta3/zz_generated.deepcopy.go
index ec02d2a9c4..09fefa20aa 100644
--- a/vendor/k8s.io/api/flowcontrol/v1beta3/zz_generated.deepcopy.go
+++ b/vendor/k8s.io/api/flowcontrol/v1beta3/zz_generated.deepcopy.go
@@ -25,6 +25,32 @@ import (
runtime "k8s.io/apimachinery/pkg/runtime"
)
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ExemptPriorityLevelConfiguration) DeepCopyInto(out *ExemptPriorityLevelConfiguration) {
+ *out = *in
+ if in.NominalConcurrencyShares != nil {
+ in, out := &in.NominalConcurrencyShares, &out.NominalConcurrencyShares
+ *out = new(int32)
+ **out = **in
+ }
+ if in.LendablePercent != nil {
+ in, out := &in.LendablePercent, &out.LendablePercent
+ *out = new(int32)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExemptPriorityLevelConfiguration.
+func (in *ExemptPriorityLevelConfiguration) DeepCopy() *ExemptPriorityLevelConfiguration {
+ if in == nil {
+ return nil
+ }
+ out := new(ExemptPriorityLevelConfiguration)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *FlowDistinguisherMethod) DeepCopyInto(out *FlowDistinguisherMethod) {
*out = *in
@@ -400,6 +426,11 @@ func (in *PriorityLevelConfigurationSpec) DeepCopyInto(out *PriorityLevelConfigu
*out = new(LimitedPriorityLevelConfiguration)
(*in).DeepCopyInto(*out)
}
+ if in.Exempt != nil {
+ in, out := &in.Exempt, &out.Exempt
+ *out = new(ExemptPriorityLevelConfiguration)
+ (*in).DeepCopyInto(*out)
+ }
return
}
diff --git a/vendor/k8s.io/api/networking/v1/generated.pb.go b/vendor/k8s.io/api/networking/v1/generated.pb.go
index e9566d57e2..daeaea5dce 100644
--- a/vendor/k8s.io/api/networking/v1/generated.pb.go
+++ b/vendor/k8s.io/api/networking/v1/generated.pb.go
@@ -776,38 +776,10 @@ func (m *NetworkPolicySpec) XXX_DiscardUnknown() {
var xxx_messageInfo_NetworkPolicySpec proto.InternalMessageInfo
-func (m *NetworkPolicyStatus) Reset() { *m = NetworkPolicyStatus{} }
-func (*NetworkPolicyStatus) ProtoMessage() {}
-func (*NetworkPolicyStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_1c72867a70a7cc90, []int{26}
-}
-func (m *NetworkPolicyStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *NetworkPolicyStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *NetworkPolicyStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_NetworkPolicyStatus.Merge(m, src)
-}
-func (m *NetworkPolicyStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *NetworkPolicyStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_NetworkPolicyStatus.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_NetworkPolicyStatus proto.InternalMessageInfo
-
func (m *ServiceBackendPort) Reset() { *m = ServiceBackendPort{} }
func (*ServiceBackendPort) ProtoMessage() {}
func (*ServiceBackendPort) Descriptor() ([]byte, []int) {
- return fileDescriptor_1c72867a70a7cc90, []int{27}
+ return fileDescriptor_1c72867a70a7cc90, []int{26}
}
func (m *ServiceBackendPort) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -859,7 +831,6 @@ func init() {
proto.RegisterType((*NetworkPolicyPeer)(nil), "k8s.io.api.networking.v1.NetworkPolicyPeer")
proto.RegisterType((*NetworkPolicyPort)(nil), "k8s.io.api.networking.v1.NetworkPolicyPort")
proto.RegisterType((*NetworkPolicySpec)(nil), "k8s.io.api.networking.v1.NetworkPolicySpec")
- proto.RegisterType((*NetworkPolicyStatus)(nil), "k8s.io.api.networking.v1.NetworkPolicyStatus")
proto.RegisterType((*ServiceBackendPort)(nil), "k8s.io.api.networking.v1.ServiceBackendPort")
}
@@ -868,115 +839,112 @@ func init() {
}
var fileDescriptor_1c72867a70a7cc90 = []byte{
- // 1715 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0x4b, 0x6f, 0x1b, 0x47,
- 0x12, 0xd6, 0x50, 0xa2, 0x48, 0x35, 0x25, 0x59, 0x6a, 0xdb, 0x58, 0xae, 0x16, 0x4b, 0x6a, 0x07,
- 0x6b, 0x5b, 0xbb, 0xb6, 0xc9, 0xb5, 0x6c, 0x2c, 0x76, 0x2f, 0x49, 0x3c, 0xb2, 0x2c, 0x2b, 0x96,
- 0x29, 0xa2, 0xc9, 0x38, 0x48, 0x90, 0x87, 0x47, 0xc3, 0x16, 0x35, 0xe6, 0x70, 0x7a, 0xd0, 0xd3,
- 0x54, 0xac, 0x20, 0x08, 0x72, 0xc9, 0x21, 0xb7, 0xdc, 0x72, 0x0e, 0xf2, 0x0b, 0x82, 0xe4, 0x10,
- 0x20, 0x48, 0x8c, 0x5c, 0x02, 0x1f, 0x0d, 0xe4, 0xe2, 0x4b, 0x88, 0x98, 0xf9, 0x17, 0x3a, 0x05,
- 0xfd, 0x98, 0x17, 0x1f, 0x22, 0x63, 0x18, 0x3a, 0x49, 0x5d, 0x55, 0xfd, 0x75, 0xbd, 0xab, 0x86,
- 0xe0, 0x66, 0xeb, 0x7f, 0x7e, 0xc9, 0x26, 0xe5, 0x56, 0x67, 0x0f, 0x53, 0x17, 0x33, 0xec, 0x97,
- 0x0f, 0xb1, 0xdb, 0x20, 0xb4, 0xac, 0x18, 0xa6, 0x67, 0x97, 0x5d, 0xcc, 0x3e, 0x20, 0xb4, 0x65,
- 0xbb, 0xcd, 0xf2, 0xe1, 0xb5, 0x72, 0x13, 0xbb, 0x98, 0x9a, 0x0c, 0x37, 0x4a, 0x1e, 0x25, 0x8c,
- 0xc0, 0xbc, 0x94, 0x2c, 0x99, 0x9e, 0x5d, 0x8a, 0x24, 0x4b, 0x87, 0xd7, 0x56, 0xae, 0x36, 0x6d,
- 0x76, 0xd0, 0xd9, 0x2b, 0x59, 0xa4, 0x5d, 0x6e, 0x92, 0x26, 0x29, 0x8b, 0x0b, 0x7b, 0x9d, 0x7d,
- 0x71, 0x12, 0x07, 0xf1, 0x9f, 0x04, 0x5a, 0xd1, 0x63, 0x4f, 0x5a, 0x84, 0xe2, 0x21, 0x8f, 0xad,
- 0xdc, 0x88, 0x64, 0xda, 0xa6, 0x75, 0x60, 0xbb, 0x98, 0x1e, 0x95, 0xbd, 0x56, 0x93, 0x13, 0xfc,
- 0x72, 0x1b, 0x33, 0x73, 0xd8, 0xad, 0xf2, 0xa8, 0x5b, 0xb4, 0xe3, 0x32, 0xbb, 0x8d, 0x07, 0x2e,
- 0xfc, 0x77, 0xdc, 0x05, 0xdf, 0x3a, 0xc0, 0x6d, 0x73, 0xe0, 0xde, 0xf5, 0x51, 0xf7, 0x3a, 0xcc,
- 0x76, 0xca, 0xb6, 0xcb, 0x7c, 0x46, 0xfb, 0x2f, 0xe9, 0x3f, 0x6a, 0xe0, 0xcc, 0x9d, 0x7a, 0xbd,
- 0xba, 0xed, 0x36, 0x29, 0xf6, 0xfd, 0xaa, 0xc9, 0x0e, 0xe0, 0x2a, 0x98, 0xf1, 0x4c, 0x76, 0x90,
- 0xd7, 0x56, 0xb5, 0xb5, 0x39, 0x63, 0xfe, 0x49, 0xb7, 0x38, 0xd5, 0xeb, 0x16, 0x67, 0x38, 0x0f,
- 0x09, 0x0e, 0xbc, 0x01, 0xb2, 0xfc, 0x6f, 0xfd, 0xc8, 0xc3, 0xf9, 0x69, 0x21, 0x95, 0xef, 0x75,
- 0x8b, 0xd9, 0xaa, 0xa2, 0x1d, 0xc7, 0xfe, 0x47, 0xa1, 0x24, 0xac, 0x81, 0xcc, 0x9e, 0x69, 0xb5,
- 0xb0, 0xdb, 0xc8, 0xa7, 0x56, 0xb5, 0xb5, 0xdc, 0xfa, 0x5a, 0x69, 0x54, 0xf8, 0x4a, 0x4a, 0x1f,
- 0x43, 0xca, 0x1b, 0x67, 0x94, 0x12, 0x19, 0x45, 0x40, 0x01, 0x92, 0xbe, 0x0f, 0xce, 0xc5, 0xf4,
- 0x47, 0x1d, 0x07, 0xdf, 0x37, 0x9d, 0x0e, 0x86, 0x15, 0x90, 0xe6, 0x0f, 0xfb, 0x79, 0x6d, 0x75,
- 0x7a, 0x2d, 0xb7, 0xfe, 0xaf, 0xd1, 0x4f, 0xf5, 0x99, 0x6f, 0x2c, 0xa8, 0xb7, 0xd2, 0xfc, 0xe4,
- 0x23, 0x09, 0xa3, 0xef, 0x82, 0xcc, 0x76, 0xd5, 0x70, 0x88, 0xd5, 0xe2, 0xfe, 0xb1, 0xec, 0x06,
- 0xed, 0xf7, 0xcf, 0xc6, 0xf6, 0x2d, 0x84, 0x04, 0x07, 0xea, 0x60, 0x16, 0x3f, 0xb2, 0xb0, 0xc7,
- 0xf2, 0xa9, 0xd5, 0xe9, 0xb5, 0x39, 0x03, 0xf4, 0xba, 0xc5, 0xd9, 0x4d, 0x41, 0x41, 0x8a, 0xa3,
- 0x7f, 0x9a, 0x02, 0x19, 0xf5, 0x2c, 0x7c, 0x00, 0xb2, 0x3c, 0x7d, 0x1a, 0x26, 0x33, 0x05, 0x6a,
- 0x6e, 0xfd, 0x3f, 0x31, 0x7d, 0xc3, 0x68, 0x96, 0xbc, 0x56, 0x93, 0x13, 0xfc, 0x12, 0x97, 0xe6,
- 0xba, 0xef, 0xee, 0x3d, 0xc4, 0x16, 0xbb, 0x87, 0x99, 0x69, 0x40, 0xa5, 0x07, 0x88, 0x68, 0x28,
- 0x44, 0x85, 0x5b, 0x60, 0xc6, 0xf7, 0xb0, 0xa5, 0x1c, 0x7f, 0x61, 0xac, 0xe3, 0x6b, 0x1e, 0xb6,
- 0x22, 0xd3, 0xf8, 0x09, 0x09, 0x00, 0xb8, 0x0b, 0x66, 0x7d, 0x66, 0xb2, 0x8e, 0x2f, 0x02, 0x9f,
- 0x5b, 0xbf, 0x34, 0x1e, 0x4a, 0x88, 0x1b, 0x8b, 0x0a, 0x6c, 0x56, 0x9e, 0x91, 0x82, 0xd1, 0x7f,
- 0xd2, 0xc0, 0x62, 0x32, 0xda, 0xf0, 0x3e, 0xc8, 0xf8, 0x98, 0x1e, 0xda, 0x16, 0xce, 0xcf, 0x88,
- 0x47, 0xca, 0xe3, 0x1f, 0x91, 0xf2, 0x41, 0xbe, 0xe4, 0x78, 0xae, 0x28, 0x1a, 0x0a, 0xc0, 0xe0,
- 0x9b, 0x20, 0x4b, 0xb1, 0x4f, 0x3a, 0xd4, 0xc2, 0x4a, 0xfb, 0xab, 0x71, 0x60, 0x5e, 0xf7, 0x1c,
- 0x92, 0x27, 0x6b, 0x63, 0x87, 0x58, 0xa6, 0x23, 0x5d, 0x89, 0xf0, 0x3e, 0xa6, 0xd8, 0xb5, 0xb0,
- 0x31, 0xcf, 0xb3, 0x1c, 0x29, 0x08, 0x14, 0x82, 0xf1, 0x2a, 0x9a, 0x57, 0x8a, 0x6c, 0x38, 0xe6,
- 0xa9, 0x04, 0x74, 0x27, 0x11, 0xd0, 0x7f, 0x8f, 0x75, 0x90, 0xd0, 0x6b, 0x54, 0x54, 0xf5, 0x1f,
- 0x34, 0xb0, 0x14, 0x17, 0xdc, 0xb1, 0x7d, 0x06, 0xdf, 0x19, 0x30, 0xa2, 0x34, 0x99, 0x11, 0xfc,
- 0xb6, 0x30, 0x61, 0x49, 0x3d, 0x95, 0x0d, 0x28, 0x31, 0x03, 0xee, 0x82, 0xb4, 0xcd, 0x70, 0xdb,
- 0x17, 0x25, 0x92, 0x5b, 0xbf, 0x38, 0x99, 0x05, 0x51, 0x75, 0x6e, 0xf3, 0xcb, 0x48, 0x62, 0xe8,
- 0xbf, 0x6a, 0xa0, 0x18, 0x17, 0xab, 0x9a, 0xd4, 0x6c, 0x63, 0x86, 0xa9, 0x1f, 0x06, 0x0f, 0xae,
- 0x81, 0xac, 0x59, 0xdd, 0xde, 0xa2, 0xa4, 0xe3, 0x05, 0xa5, 0xcb, 0x55, 0xbb, 0xa9, 0x68, 0x28,
- 0xe4, 0xf2, 0x02, 0x6f, 0xd9, 0xaa, 0x4b, 0xc5, 0x0a, 0xfc, 0xae, 0xed, 0x36, 0x90, 0xe0, 0x70,
- 0x09, 0xd7, 0x6c, 0x07, 0xcd, 0x2f, 0x94, 0xa8, 0x98, 0x6d, 0x8c, 0x04, 0x07, 0x16, 0x41, 0xda,
- 0xb7, 0x88, 0x27, 0x33, 0x78, 0xce, 0x98, 0xe3, 0x2a, 0xd7, 0x38, 0x01, 0x49, 0x3a, 0xbc, 0x0c,
- 0xe6, 0xb8, 0xa0, 0xef, 0x99, 0x16, 0xce, 0xa7, 0x85, 0xd0, 0x42, 0xaf, 0x5b, 0x9c, 0xab, 0x04,
- 0x44, 0x14, 0xf1, 0xf5, 0xaf, 0xfb, 0xe2, 0xc3, 0x43, 0x07, 0xd7, 0x01, 0xb0, 0x88, 0xcb, 0x28,
- 0x71, 0x1c, 0x1c, 0x74, 0xa3, 0x30, 0x69, 0x36, 0x42, 0x0e, 0x8a, 0x49, 0x41, 0x1b, 0x00, 0x2f,
- 0xf4, 0x8d, 0x4a, 0x9e, 0xff, 0x4f, 0xe6, 0xfa, 0x21, 0x3e, 0x35, 0x16, 0xf9, 0x53, 0x31, 0x46,
- 0x0c, 0x5c, 0xff, 0x46, 0x03, 0x39, 0x75, 0xff, 0x14, 0xd2, 0xe9, 0x76, 0x32, 0x9d, 0xfe, 0x31,
- 0x7e, 0xb4, 0x0c, 0xcf, 0xa4, 0xef, 0x34, 0xb0, 0x12, 0x68, 0x4d, 0xcc, 0x86, 0x61, 0x3a, 0xa6,
- 0x6b, 0x61, 0x1a, 0x74, 0xea, 0x15, 0x90, 0xb2, 0x83, 0xf4, 0x01, 0x0a, 0x20, 0xb5, 0x5d, 0x45,
- 0x29, 0xdb, 0x83, 0x57, 0x40, 0xf6, 0x80, 0xf8, 0x4c, 0x24, 0x86, 0x4c, 0x9d, 0x50, 0xe1, 0x3b,
- 0x8a, 0x8e, 0x42, 0x09, 0x58, 0x05, 0x69, 0x8f, 0x50, 0xe6, 0xe7, 0x67, 0x84, 0xc2, 0x97, 0xc7,
- 0x2a, 0x5c, 0x25, 0x94, 0xa9, 0x5e, 0x1a, 0x8d, 0x28, 0x8e, 0x80, 0x24, 0x90, 0xfe, 0x11, 0xf8,
- 0xeb, 0x10, 0xcd, 0xe5, 0x15, 0xf8, 0x3e, 0xc8, 0xd8, 0x92, 0xa9, 0x26, 0xe2, 0x8d, 0xb1, 0x0f,
- 0x0e, 0xb1, 0x3f, 0x1a, 0xc4, 0xc1, 0xc0, 0x0d, 0x50, 0xf5, 0xaf, 0x34, 0xb0, 0x3c, 0xa0, 0xa9,
- 0xd8, 0x25, 0x08, 0x65, 0xc2, 0x63, 0xe9, 0xd8, 0x2e, 0x41, 0x28, 0x43, 0x82, 0x03, 0xef, 0x82,
- 0xac, 0x58, 0x45, 0x2c, 0xe2, 0x28, 0xaf, 0x95, 0x03, 0xaf, 0x55, 0x15, 0xfd, 0xb8, 0x5b, 0xfc,
- 0xdb, 0xe0, 0x7e, 0x56, 0x0a, 0xd8, 0x28, 0x04, 0xe0, 0x55, 0x87, 0x29, 0x25, 0x54, 0x15, 0xa6,
- 0xa8, 0xba, 0x4d, 0x4e, 0x40, 0x92, 0xae, 0x7f, 0x19, 0x25, 0x25, 0xdf, 0x15, 0xb8, 0x7e, 0x3c,
- 0x22, 0xfd, 0xb3, 0x9c, 0xc7, 0x0b, 0x09, 0x0e, 0xf4, 0xc0, 0x92, 0xdd, 0xb7, 0x5c, 0x4c, 0xdc,
- 0x74, 0xc3, 0x1b, 0x46, 0x5e, 0x21, 0x2f, 0xf5, 0x73, 0xd0, 0x00, 0xba, 0xfe, 0x00, 0x0c, 0x48,
- 0xf1, 0x76, 0x7f, 0xc0, 0x98, 0x37, 0xa4, 0x70, 0x46, 0x6f, 0x33, 0xd1, 0xeb, 0x59, 0x61, 0x53,
- 0xbd, 0x5e, 0x45, 0x02, 0x45, 0xff, 0x4c, 0x03, 0xe7, 0x87, 0x0e, 0xce, 0xb0, 0xb1, 0x69, 0x23,
- 0x1b, 0x5b, 0x45, 0x45, 0x54, 0xfa, 0xe0, 0xca, 0x68, 0x4d, 0x92, 0xc8, 0x3c, 0xe2, 0xc3, 0xe2,
- 0xaf, 0xff, 0x9c, 0x0a, 0x23, 0x22, 0xba, 0xda, 0x6b, 0xa1, 0xbf, 0x45, 0xd7, 0xe1, 0x2f, 0xab,
- 0x1e, 0x7a, 0x2e, 0xe6, 0xbf, 0x90, 0x87, 0x06, 0xa4, 0x61, 0x03, 0x2c, 0x36, 0xf0, 0xbe, 0xd9,
- 0x71, 0x98, 0x7a, 0x5b, 0x79, 0x6d, 0xf2, 0x75, 0x13, 0xf6, 0xba, 0xc5, 0xc5, 0x5b, 0x09, 0x0c,
- 0xd4, 0x87, 0x09, 0x37, 0xc0, 0x34, 0x73, 0x82, 0x76, 0xf3, 0xcf, 0xb1, 0xd0, 0xf5, 0x9d, 0x9a,
- 0x91, 0x53, 0xe6, 0x4f, 0xd7, 0x77, 0x6a, 0x88, 0xdf, 0x86, 0xaf, 0x83, 0x34, 0xed, 0x38, 0x98,
- 0x2f, 0x53, 0xd3, 0x13, 0xed, 0x65, 0x3c, 0xa6, 0x51, 0xf9, 0xf3, 0x93, 0x8f, 0x24, 0x84, 0xfe,
- 0x31, 0x58, 0x48, 0x6c, 0x5c, 0xb0, 0x0d, 0xe6, 0x9d, 0x58, 0x09, 0x2b, 0x2f, 0x5c, 0xff, 0x53,
- 0x75, 0xaf, 0x1a, 0xce, 0x39, 0xf5, 0xe2, 0x7c, 0x9c, 0x87, 0x12, 0xf0, 0xba, 0x09, 0x40, 0x64,
- 0x2b, 0xaf, 0x44, 0x5e, 0x3e, 0xb2, 0xdb, 0xa8, 0x4a, 0xe4, 0x55, 0xe5, 0x23, 0x49, 0xe7, 0xd3,
- 0xcb, 0xc7, 0x16, 0xc5, 0xac, 0x12, 0xf5, 0xcb, 0x70, 0x7a, 0xd5, 0x42, 0x0e, 0x8a, 0x49, 0xe9,
- 0x5f, 0xa4, 0xc0, 0x42, 0x45, 0xaa, 0x5c, 0x25, 0x8e, 0x6d, 0x1d, 0x9d, 0xc2, 0xa2, 0x75, 0x2f,
- 0xb1, 0x68, 0x9d, 0xd0, 0xa6, 0x13, 0x8a, 0x8d, 0xdc, 0x9f, 0xdf, 0xe8, 0xdb, 0x9f, 0xaf, 0x4e,
- 0x0a, 0x78, 0xf2, 0x16, 0xfd, 0xad, 0x06, 0xfe, 0x92, 0x90, 0xdf, 0x8c, 0x7a, 0x5c, 0x38, 0x69,
- 0xb4, 0x71, 0x93, 0x26, 0x81, 0x20, 0x2a, 0x76, 0xe8, 0xa4, 0x81, 0x5b, 0x20, 0xc5, 0x88, 0x4a,
- 0xfd, 0x89, 0xe1, 0x30, 0xa6, 0xd1, 0xc8, 0xac, 0x13, 0x94, 0x62, 0x44, 0xff, 0x5e, 0x03, 0xf9,
- 0x84, 0x54, 0xbc, 0x37, 0xbf, 0x7c, 0xbd, 0xef, 0x81, 0x99, 0x7d, 0x4a, 0xda, 0x2f, 0xa2, 0x79,
- 0x18, 0xcb, 0xdb, 0x94, 0xb4, 0x91, 0x80, 0xd1, 0x1f, 0x6b, 0x60, 0x39, 0x21, 0x79, 0x0a, 0x7b,
- 0xce, 0x4e, 0x72, 0xcf, 0xb9, 0x34, 0xa1, 0x0d, 0x23, 0xb6, 0x9d, 0xc7, 0xa9, 0x3e, 0x0b, 0xb8,
- 0xad, 0x70, 0x1f, 0xe4, 0x3c, 0xd2, 0xa8, 0x61, 0x07, 0x5b, 0x8c, 0x0c, 0xeb, 0x1b, 0x27, 0x19,
- 0x61, 0xee, 0x61, 0x27, 0xb8, 0x6a, 0x9c, 0xe9, 0x75, 0x8b, 0xb9, 0x6a, 0x84, 0x85, 0xe2, 0xc0,
- 0xf0, 0x11, 0x58, 0x0e, 0x57, 0xdc, 0xf0, 0xb5, 0xd4, 0x8b, 0xbf, 0x76, 0xbe, 0xd7, 0x2d, 0x2e,
- 0x57, 0xfa, 0x11, 0xd1, 0xe0, 0x23, 0xf0, 0x0e, 0xc8, 0xd8, 0x9e, 0xf8, 0x9a, 0x57, 0x65, 0x78,
- 0xd2, 0xbe, 0x28, 0x3f, 0xfb, 0xe5, 0x37, 0xa5, 0x3a, 0xa0, 0xe0, 0xba, 0xfe, 0x4b, 0x7f, 0x0e,
- 0xf0, 0x84, 0x83, 0x5b, 0xb1, 0xa5, 0x46, 0x8e, 0xd2, 0xcb, 0x2f, 0xb6, 0xd0, 0x24, 0xa7, 0xed,
- 0xe8, 0xde, 0xd6, 0x61, 0xb6, 0x53, 0x92, 0xbf, 0xf1, 0x94, 0xb6, 0x5d, 0xb6, 0x4b, 0x6b, 0x8c,
- 0xda, 0x6e, 0x53, 0x4e, 0xfe, 0xd8, 0xb6, 0x75, 0x01, 0x64, 0xd4, 0x30, 0x16, 0x86, 0xa7, 0xa5,
- 0x55, 0x9b, 0x92, 0x84, 0x02, 0x9e, 0x7e, 0xdc, 0x9f, 0x17, 0x62, 0x34, 0x3f, 0x7c, 0x69, 0x79,
- 0x71, 0x56, 0x65, 0xe3, 0xe8, 0xdc, 0x78, 0x37, 0xda, 0x57, 0x65, 0xa6, 0xaf, 0x4f, 0x98, 0xe9,
- 0xf1, 0x41, 0x39, 0x72, 0x5b, 0x85, 0x6f, 0x81, 0x59, 0x2c, 0xd1, 0xe5, 0xe4, 0xbd, 0x36, 0x21,
- 0x7a, 0xd4, 0x56, 0xa3, 0x56, 0xac, 0x68, 0x0a, 0x10, 0xbe, 0xca, 0xbd, 0xc4, 0x65, 0xeb, 0x47,
- 0x1e, 0x96, 0xeb, 0xfd, 0x9c, 0xf1, 0x77, 0x69, 0x6c, 0x48, 0x3e, 0xe6, 0xdf, 0x4d, 0xe1, 0x11,
- 0xc5, 0x6f, 0xe8, 0x1f, 0x82, 0xb3, 0x43, 0x5a, 0x3f, 0xb4, 0xc4, 0xe7, 0x5e, 0xc3, 0x66, 0x36,
- 0x71, 0x83, 0x9e, 0x58, 0x9e, 0xcc, 0xf9, 0x1b, 0xc1, 0xbd, 0xc4, 0xf7, 0xa1, 0x82, 0x42, 0x31,
- 0x58, 0xfd, 0x3d, 0x00, 0x07, 0xf7, 0xb6, 0x09, 0xb6, 0xc2, 0x8b, 0x60, 0xd6, 0xed, 0xb4, 0xf7,
- 0xb0, 0xac, 0xdf, 0x74, 0xe4, 0x9c, 0x8a, 0xa0, 0x22, 0xc5, 0x35, 0x5e, 0x79, 0xf2, 0xbc, 0x30,
- 0xf5, 0xf4, 0x79, 0x61, 0xea, 0xd9, 0xf3, 0xc2, 0xd4, 0x27, 0xbd, 0x82, 0xf6, 0xa4, 0x57, 0xd0,
- 0x9e, 0xf6, 0x0a, 0xda, 0xb3, 0x5e, 0x41, 0xfb, 0xad, 0x57, 0xd0, 0x3e, 0xff, 0xbd, 0x30, 0xf5,
- 0x76, 0x7e, 0xd4, 0x0f, 0xc0, 0x7f, 0x04, 0x00, 0x00, 0xff, 0xff, 0x61, 0x0f, 0x0a, 0xd7, 0x34,
- 0x16, 0x00, 0x00,
+ // 1671 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0xcb, 0x6f, 0x1b, 0xd5,
+ 0x1a, 0xcf, 0x38, 0x71, 0xec, 0x1c, 0x27, 0x69, 0x72, 0x6e, 0xab, 0xeb, 0x9b, 0xab, 0x6b, 0xe7,
+ 0x8e, 0x68, 0x1b, 0x68, 0x6b, 0xd3, 0xb4, 0x42, 0xb0, 0x01, 0x3a, 0x69, 0x9a, 0x86, 0xa6, 0x8e,
+ 0x75, 0x6c, 0x15, 0x81, 0x78, 0x74, 0x32, 0x3e, 0xb1, 0xa7, 0x1e, 0xcf, 0x19, 0x9d, 0x39, 0x0e,
+ 0xad, 0x84, 0x10, 0x1b, 0x16, 0xec, 0xf8, 0x17, 0x10, 0x7f, 0x01, 0x82, 0x05, 0x12, 0x82, 0xc2,
+ 0x06, 0x75, 0x59, 0x89, 0x4d, 0x37, 0x58, 0xd4, 0xfc, 0x17, 0x59, 0xa1, 0xf3, 0x98, 0x97, 0x1f,
+ 0xb5, 0xa9, 0xaa, 0xac, 0x92, 0xf3, 0x7d, 0xdf, 0xf9, 0x7d, 0x8f, 0xf3, 0xbd, 0xc6, 0xe0, 0x5a,
+ 0xfb, 0x75, 0xbf, 0x64, 0x93, 0x72, 0xbb, 0x7b, 0x80, 0xa9, 0x8b, 0x19, 0xf6, 0xcb, 0x47, 0xd8,
+ 0x6d, 0x10, 0x5a, 0x56, 0x0c, 0xd3, 0xb3, 0xcb, 0x2e, 0x66, 0x9f, 0x10, 0xda, 0xb6, 0xdd, 0x66,
+ 0xf9, 0xe8, 0x72, 0xb9, 0x89, 0x5d, 0x4c, 0x4d, 0x86, 0x1b, 0x25, 0x8f, 0x12, 0x46, 0x60, 0x5e,
+ 0x4a, 0x96, 0x4c, 0xcf, 0x2e, 0x45, 0x92, 0xa5, 0xa3, 0xcb, 0x6b, 0x97, 0x9a, 0x36, 0x6b, 0x75,
+ 0x0f, 0x4a, 0x16, 0xe9, 0x94, 0x9b, 0xa4, 0x49, 0xca, 0xe2, 0xc2, 0x41, 0xf7, 0x50, 0x9c, 0xc4,
+ 0x41, 0xfc, 0x27, 0x81, 0xd6, 0xf4, 0x98, 0x4a, 0x8b, 0x50, 0x3c, 0x42, 0xd9, 0xda, 0xd5, 0x48,
+ 0xa6, 0x63, 0x5a, 0x2d, 0xdb, 0xc5, 0xf4, 0x41, 0xd9, 0x6b, 0x37, 0x39, 0xc1, 0x2f, 0x77, 0x30,
+ 0x33, 0x47, 0xdd, 0x2a, 0x8f, 0xbb, 0x45, 0xbb, 0x2e, 0xb3, 0x3b, 0x78, 0xe8, 0xc2, 0x6b, 0x93,
+ 0x2e, 0xf8, 0x56, 0x0b, 0x77, 0xcc, 0xa1, 0x7b, 0x57, 0xc6, 0xdd, 0xeb, 0x32, 0xdb, 0x29, 0xdb,
+ 0x2e, 0xf3, 0x19, 0x1d, 0xbc, 0xa4, 0xff, 0xac, 0x81, 0x53, 0x37, 0xeb, 0xf5, 0xea, 0xae, 0xdb,
+ 0xa4, 0xd8, 0xf7, 0xab, 0x26, 0x6b, 0xc1, 0x75, 0x30, 0xe7, 0x99, 0xac, 0x95, 0xd7, 0xd6, 0xb5,
+ 0x8d, 0x05, 0x63, 0xf1, 0x51, 0xaf, 0x38, 0xd3, 0xef, 0x15, 0xe7, 0x38, 0x0f, 0x09, 0x0e, 0xbc,
+ 0x0a, 0xb2, 0xfc, 0x6f, 0xfd, 0x81, 0x87, 0xf3, 0xb3, 0x42, 0x2a, 0xdf, 0xef, 0x15, 0xb3, 0x55,
+ 0x45, 0x3b, 0x8e, 0xfd, 0x8f, 0x42, 0x49, 0x58, 0x03, 0x99, 0x03, 0xd3, 0x6a, 0x63, 0xb7, 0x91,
+ 0x4f, 0xad, 0x6b, 0x1b, 0xb9, 0xcd, 0x8d, 0xd2, 0xb8, 0xe7, 0x2b, 0x29, 0x7b, 0x0c, 0x29, 0x6f,
+ 0x9c, 0x52, 0x46, 0x64, 0x14, 0x01, 0x05, 0x48, 0xfa, 0x21, 0x38, 0x1d, 0xb3, 0x1f, 0x75, 0x1d,
+ 0x7c, 0xc7, 0x74, 0xba, 0x18, 0x56, 0x40, 0x9a, 0x2b, 0xf6, 0xf3, 0xda, 0xfa, 0xec, 0x46, 0x6e,
+ 0xf3, 0xe5, 0xf1, 0xaa, 0x06, 0xdc, 0x37, 0x96, 0x94, 0xae, 0x34, 0x3f, 0xf9, 0x48, 0xc2, 0xe8,
+ 0xfb, 0x20, 0xb3, 0x5b, 0x35, 0x1c, 0x62, 0xb5, 0x79, 0x7c, 0x2c, 0xbb, 0x41, 0x07, 0xe3, 0xb3,
+ 0xb5, 0x7b, 0x1d, 0x21, 0xc1, 0x81, 0x3a, 0x98, 0xc7, 0xf7, 0x2d, 0xec, 0xb1, 0x7c, 0x6a, 0x7d,
+ 0x76, 0x63, 0xc1, 0x00, 0xfd, 0x5e, 0x71, 0x7e, 0x5b, 0x50, 0x90, 0xe2, 0xe8, 0x5f, 0xa4, 0x40,
+ 0x46, 0xa9, 0x85, 0x77, 0x41, 0x96, 0xa7, 0x4f, 0xc3, 0x64, 0xa6, 0x40, 0xcd, 0x6d, 0xbe, 0x1a,
+ 0xb3, 0x37, 0x7c, 0xcd, 0x92, 0xd7, 0x6e, 0x72, 0x82, 0x5f, 0xe2, 0xd2, 0xdc, 0xf6, 0xfd, 0x83,
+ 0x7b, 0xd8, 0x62, 0xb7, 0x31, 0x33, 0x0d, 0xa8, 0xec, 0x00, 0x11, 0x0d, 0x85, 0xa8, 0x70, 0x07,
+ 0xcc, 0xf9, 0x1e, 0xb6, 0x54, 0xe0, 0xcf, 0x4e, 0x0c, 0x7c, 0xcd, 0xc3, 0x56, 0xe4, 0x1a, 0x3f,
+ 0x21, 0x01, 0x00, 0xf7, 0xc1, 0xbc, 0xcf, 0x4c, 0xd6, 0xf5, 0xc5, 0xc3, 0xe7, 0x36, 0xcf, 0x4f,
+ 0x86, 0x12, 0xe2, 0xc6, 0xb2, 0x02, 0x9b, 0x97, 0x67, 0xa4, 0x60, 0xf4, 0x5f, 0x35, 0xb0, 0x9c,
+ 0x7c, 0x6d, 0x78, 0x07, 0x64, 0x7c, 0x4c, 0x8f, 0x6c, 0x0b, 0xe7, 0xe7, 0x84, 0x92, 0xf2, 0x64,
+ 0x25, 0x52, 0x3e, 0xc8, 0x97, 0x1c, 0xcf, 0x15, 0x45, 0x43, 0x01, 0x18, 0x7c, 0x17, 0x64, 0x29,
+ 0xf6, 0x49, 0x97, 0x5a, 0x58, 0x59, 0x7f, 0x29, 0x0e, 0xcc, 0xeb, 0x9e, 0x43, 0xf2, 0x64, 0x6d,
+ 0xec, 0x11, 0xcb, 0x74, 0x64, 0x28, 0x11, 0x3e, 0xc4, 0x14, 0xbb, 0x16, 0x36, 0x16, 0x79, 0x96,
+ 0x23, 0x05, 0x81, 0x42, 0x30, 0x5e, 0x45, 0x8b, 0xca, 0x90, 0x2d, 0xc7, 0x3c, 0x91, 0x07, 0xdd,
+ 0x4b, 0x3c, 0xe8, 0x2b, 0x13, 0x03, 0x24, 0xec, 0x1a, 0xf7, 0xaa, 0xfa, 0x4f, 0x1a, 0x58, 0x89,
+ 0x0b, 0xee, 0xd9, 0x3e, 0x83, 0x1f, 0x0c, 0x39, 0x51, 0x9a, 0xce, 0x09, 0x7e, 0x5b, 0xb8, 0xb0,
+ 0xa2, 0x54, 0x65, 0x03, 0x4a, 0xcc, 0x81, 0x5b, 0x20, 0x6d, 0x33, 0xdc, 0xf1, 0x45, 0x89, 0xe4,
+ 0x36, 0xcf, 0x4d, 0xe7, 0x41, 0x54, 0x9d, 0xbb, 0xfc, 0x32, 0x92, 0x18, 0xfa, 0x1f, 0x1a, 0x28,
+ 0xc6, 0xc5, 0xaa, 0x26, 0x35, 0x3b, 0x98, 0x61, 0xea, 0x87, 0x8f, 0x07, 0x37, 0x40, 0xd6, 0xac,
+ 0xee, 0xee, 0x50, 0xd2, 0xf5, 0x82, 0xd2, 0xe5, 0xa6, 0x5d, 0x53, 0x34, 0x14, 0x72, 0x79, 0x81,
+ 0xb7, 0x6d, 0xd5, 0xa5, 0x62, 0x05, 0x7e, 0xcb, 0x76, 0x1b, 0x48, 0x70, 0xb8, 0x84, 0x6b, 0x76,
+ 0x82, 0xe6, 0x17, 0x4a, 0x54, 0xcc, 0x0e, 0x46, 0x82, 0x03, 0x8b, 0x20, 0xed, 0x5b, 0xc4, 0x93,
+ 0x19, 0xbc, 0x60, 0x2c, 0x70, 0x93, 0x6b, 0x9c, 0x80, 0x24, 0x1d, 0x5e, 0x00, 0x0b, 0x5c, 0xd0,
+ 0xf7, 0x4c, 0x0b, 0xe7, 0xd3, 0x42, 0x68, 0xa9, 0xdf, 0x2b, 0x2e, 0x54, 0x02, 0x22, 0x8a, 0xf8,
+ 0xfa, 0xb7, 0x03, 0xef, 0xc3, 0x9f, 0x0e, 0x6e, 0x02, 0x60, 0x11, 0x97, 0x51, 0xe2, 0x38, 0x38,
+ 0xe8, 0x46, 0x61, 0xd2, 0x6c, 0x85, 0x1c, 0x14, 0x93, 0x82, 0x36, 0x00, 0x5e, 0x18, 0x1b, 0x95,
+ 0x3c, 0x6f, 0x4c, 0x17, 0xfa, 0x11, 0x31, 0x35, 0x96, 0xb9, 0xaa, 0x18, 0x23, 0x06, 0xae, 0x7f,
+ 0xa7, 0x81, 0x9c, 0xba, 0x7f, 0x02, 0xe9, 0x74, 0x23, 0x99, 0x4e, 0xff, 0x9f, 0x3c, 0x5a, 0x46,
+ 0x67, 0xd2, 0x0f, 0x1a, 0x58, 0x0b, 0xac, 0x26, 0x66, 0xc3, 0x30, 0x1d, 0xd3, 0xb5, 0x30, 0x0d,
+ 0x3a, 0xf5, 0x1a, 0x48, 0xd9, 0x41, 0xfa, 0x00, 0x05, 0x90, 0xda, 0xad, 0xa2, 0x94, 0xed, 0xc1,
+ 0x8b, 0x20, 0xdb, 0x22, 0x3e, 0x13, 0x89, 0x21, 0x53, 0x27, 0x34, 0xf8, 0xa6, 0xa2, 0xa3, 0x50,
+ 0x02, 0x56, 0x41, 0xda, 0x23, 0x94, 0xf9, 0xf9, 0x39, 0x61, 0xf0, 0x85, 0x89, 0x06, 0x57, 0x09,
+ 0x65, 0xaa, 0x97, 0x46, 0x23, 0x8a, 0x23, 0x20, 0x09, 0xa4, 0x7f, 0x0a, 0xfe, 0x33, 0xc2, 0x72,
+ 0x79, 0x05, 0x7e, 0x0c, 0x32, 0xb6, 0x64, 0xaa, 0x89, 0x78, 0x75, 0xa2, 0xc2, 0x11, 0xfe, 0x47,
+ 0x83, 0x38, 0x18, 0xb8, 0x01, 0xaa, 0xfe, 0x8d, 0x06, 0x56, 0x87, 0x2c, 0x15, 0xbb, 0x04, 0xa1,
+ 0x4c, 0x44, 0x2c, 0x1d, 0xdb, 0x25, 0x08, 0x65, 0x48, 0x70, 0xe0, 0x2d, 0x90, 0x15, 0xab, 0x88,
+ 0x45, 0x1c, 0x15, 0xb5, 0x72, 0x10, 0xb5, 0xaa, 0xa2, 0x1f, 0xf7, 0x8a, 0xff, 0x1d, 0xde, 0xcf,
+ 0x4a, 0x01, 0x1b, 0x85, 0x00, 0xbc, 0xea, 0x30, 0xa5, 0x84, 0xaa, 0xc2, 0x14, 0x55, 0xb7, 0xcd,
+ 0x09, 0x48, 0xd2, 0xf5, 0xaf, 0xa3, 0xa4, 0xe4, 0xbb, 0x02, 0xb7, 0x8f, 0xbf, 0xc8, 0xe0, 0x2c,
+ 0xe7, 0xef, 0x85, 0x04, 0x07, 0x7a, 0x60, 0xc5, 0x1e, 0x58, 0x2e, 0xa6, 0x6e, 0xba, 0xe1, 0x0d,
+ 0x23, 0xaf, 0x90, 0x57, 0x06, 0x39, 0x68, 0x08, 0x5d, 0xbf, 0x0b, 0x86, 0xa4, 0x78, 0xbb, 0x6f,
+ 0x31, 0xe6, 0x8d, 0x28, 0x9c, 0xf1, 0xdb, 0x4c, 0xa4, 0x3d, 0x2b, 0x7c, 0xaa, 0xd7, 0xab, 0x48,
+ 0xa0, 0xe8, 0x5f, 0x6a, 0xe0, 0xcc, 0xc8, 0xc1, 0x19, 0x36, 0x36, 0x6d, 0x6c, 0x63, 0xab, 0xa8,
+ 0x17, 0x95, 0x31, 0xb8, 0x38, 0xde, 0x92, 0x24, 0x32, 0x7f, 0xf1, 0x51, 0xef, 0xaf, 0xff, 0x96,
+ 0x0a, 0x5f, 0x44, 0x74, 0xb5, 0xb7, 0xc3, 0x78, 0x8b, 0xae, 0xc3, 0x35, 0xab, 0x1e, 0x7a, 0x3a,
+ 0x16, 0xbf, 0x90, 0x87, 0x86, 0xa4, 0x61, 0x03, 0x2c, 0x37, 0xf0, 0xa1, 0xd9, 0x75, 0x98, 0xd2,
+ 0xad, 0xa2, 0x36, 0xfd, 0xba, 0x09, 0xfb, 0xbd, 0xe2, 0xf2, 0xf5, 0x04, 0x06, 0x1a, 0xc0, 0x84,
+ 0x5b, 0x60, 0x96, 0x39, 0x41, 0xbb, 0x79, 0x69, 0x22, 0x74, 0x7d, 0xaf, 0x66, 0xe4, 0x94, 0xfb,
+ 0xb3, 0xf5, 0xbd, 0x1a, 0xe2, 0xb7, 0xe1, 0x3b, 0x20, 0x4d, 0xbb, 0x0e, 0xe6, 0xcb, 0xd4, 0xec,
+ 0x54, 0x7b, 0x19, 0x7f, 0xd3, 0xa8, 0xfc, 0xf9, 0xc9, 0x47, 0x12, 0x42, 0xff, 0x0c, 0x2c, 0x25,
+ 0x36, 0x2e, 0xd8, 0x01, 0x8b, 0x4e, 0xac, 0x84, 0x55, 0x14, 0xae, 0xfc, 0xa3, 0xba, 0x57, 0x0d,
+ 0xe7, 0xb4, 0xd2, 0xb8, 0x18, 0xe7, 0xa1, 0x04, 0xbc, 0x6e, 0x02, 0x10, 0xf9, 0xca, 0x2b, 0x91,
+ 0x97, 0x8f, 0xec, 0x36, 0xaa, 0x12, 0x79, 0x55, 0xf9, 0x48, 0xd2, 0xf9, 0xf4, 0xf2, 0xb1, 0x45,
+ 0x31, 0xab, 0x44, 0xfd, 0x32, 0x9c, 0x5e, 0xb5, 0x90, 0x83, 0x62, 0x52, 0xfa, 0x2f, 0x1a, 0x58,
+ 0xaa, 0x48, 0x93, 0xab, 0xc4, 0xb1, 0xad, 0x07, 0x27, 0xb0, 0x68, 0xdd, 0x4e, 0x2c, 0x5a, 0xcf,
+ 0x68, 0xd3, 0x09, 0xc3, 0xc6, 0x6e, 0x5a, 0xdf, 0x6b, 0xe0, 0xdf, 0x09, 0xc9, 0xed, 0xa8, 0x19,
+ 0x85, 0x23, 0x41, 0x9b, 0x34, 0x12, 0x12, 0x08, 0xa2, 0xb4, 0x46, 0x8e, 0x04, 0xb8, 0x03, 0x52,
+ 0x8c, 0xa8, 0x1c, 0x9d, 0x1a, 0x0e, 0x63, 0x1a, 0xcd, 0xb6, 0x3a, 0x41, 0x29, 0x46, 0xf4, 0x1f,
+ 0x35, 0x90, 0x4f, 0x48, 0xc5, 0x9b, 0xe8, 0x8b, 0xb7, 0xfb, 0x36, 0x98, 0x3b, 0xa4, 0xa4, 0xf3,
+ 0x3c, 0x96, 0x87, 0x41, 0xbf, 0x41, 0x49, 0x07, 0x09, 0x18, 0xfd, 0xa1, 0x06, 0x56, 0x13, 0x92,
+ 0x27, 0xb0, 0x90, 0xec, 0x25, 0x17, 0x92, 0xf3, 0x53, 0xfa, 0x30, 0x66, 0x2d, 0x79, 0x98, 0x1a,
+ 0xf0, 0x80, 0xfb, 0x0a, 0x0f, 0x41, 0xce, 0x23, 0x8d, 0x1a, 0x76, 0xb0, 0xc5, 0xc8, 0xa8, 0x02,
+ 0x7f, 0x96, 0x13, 0xe6, 0x01, 0x76, 0x82, 0xab, 0xc6, 0xa9, 0x7e, 0xaf, 0x98, 0xab, 0x46, 0x58,
+ 0x28, 0x0e, 0x0c, 0xef, 0x83, 0xd5, 0x70, 0x17, 0x0d, 0xb5, 0xa5, 0x9e, 0x5f, 0xdb, 0x99, 0x7e,
+ 0xaf, 0xb8, 0x5a, 0x19, 0x44, 0x44, 0xc3, 0x4a, 0xe0, 0x4d, 0x90, 0xb1, 0x3d, 0xf1, 0xd9, 0xad,
+ 0xbe, 0xd8, 0x9e, 0xb5, 0xd8, 0xc9, 0xef, 0x73, 0xf9, 0xf1, 0xa7, 0x0e, 0x28, 0xb8, 0xae, 0xff,
+ 0x3e, 0x98, 0x03, 0x3c, 0xe1, 0xe0, 0x4e, 0x6c, 0xfb, 0x90, 0x33, 0xef, 0xc2, 0xf3, 0x6d, 0x1e,
+ 0xc9, 0xb1, 0x38, 0xbe, 0x09, 0x75, 0x99, 0xed, 0x94, 0xe4, 0x8f, 0x31, 0xa5, 0x5d, 0x97, 0xed,
+ 0xd3, 0x1a, 0xa3, 0xb6, 0xdb, 0x94, 0x23, 0x3a, 0xb6, 0x16, 0x9d, 0x05, 0x19, 0x35, 0x35, 0x85,
+ 0xe3, 0x69, 0xe9, 0xd5, 0xb6, 0x24, 0xa1, 0x80, 0xa7, 0x1f, 0x0f, 0xe6, 0x85, 0x98, 0xa1, 0xf7,
+ 0x5e, 0x58, 0x5e, 0xfc, 0x4b, 0x65, 0xe3, 0xf8, 0xdc, 0xf8, 0x30, 0x5a, 0x2c, 0x65, 0xa6, 0x6f,
+ 0x4e, 0x99, 0xe9, 0xf1, 0x89, 0x36, 0x76, 0xad, 0x84, 0xef, 0x81, 0x79, 0x2c, 0xd1, 0xe5, 0x88,
+ 0xbc, 0x3c, 0x25, 0x7a, 0xd4, 0x56, 0xa3, 0x5f, 0x1e, 0x14, 0x4d, 0x01, 0xc2, 0xb7, 0x78, 0x94,
+ 0xb8, 0x2c, 0xff, 0xe0, 0x97, 0x7b, 0xf8, 0x82, 0xf1, 0x3f, 0xe9, 0x6c, 0x48, 0x3e, 0xe6, 0x1f,
+ 0x38, 0xe1, 0x11, 0xc5, 0x6f, 0xe8, 0x1f, 0x01, 0x38, 0xbc, 0xe4, 0x4c, 0xb1, 0x42, 0x9d, 0x03,
+ 0xf3, 0x6e, 0xb7, 0x73, 0x80, 0x65, 0x0d, 0xa5, 0x23, 0x03, 0x2b, 0x82, 0x8a, 0x14, 0xd7, 0x78,
+ 0xf3, 0xd1, 0xd3, 0xc2, 0xcc, 0xe3, 0xa7, 0x85, 0x99, 0x27, 0x4f, 0x0b, 0x33, 0x9f, 0xf7, 0x0b,
+ 0xda, 0xa3, 0x7e, 0x41, 0x7b, 0xdc, 0x2f, 0x68, 0x4f, 0xfa, 0x05, 0xed, 0xcf, 0x7e, 0x41, 0xfb,
+ 0xea, 0xaf, 0xc2, 0xcc, 0xfb, 0xf9, 0x71, 0xbf, 0x96, 0xfe, 0x1d, 0x00, 0x00, 0xff, 0xff, 0xd4,
+ 0x46, 0x40, 0xf2, 0x61, 0x15, 0x00, 0x00,
}
func (m *HTTPIngressPath) Marshal() (dAtA []byte, err error) {
@@ -1822,16 +1790,6 @@ func (m *NetworkPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) {
_ = i
var l int
_ = l
- {
- size, err := m.Status.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x1a
{
size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
@@ -2180,43 +2138,6 @@ func (m *NetworkPolicySpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
return len(dAtA) - i, nil
}
-func (m *NetworkPolicyStatus) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *NetworkPolicyStatus) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *NetworkPolicyStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if len(m.Conditions) > 0 {
- for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- }
- return len(dAtA) - i, nil
-}
-
func (m *ServiceBackendPort) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
@@ -2583,8 +2504,6 @@ func (m *NetworkPolicy) Size() (n int) {
n += 1 + l + sovGenerated(uint64(l))
l = m.Spec.Size()
n += 1 + l + sovGenerated(uint64(l))
- l = m.Status.Size()
- n += 1 + l + sovGenerated(uint64(l))
return n
}
@@ -2717,21 +2636,6 @@ func (m *NetworkPolicySpec) Size() (n int) {
return n
}
-func (m *NetworkPolicyStatus) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if len(m.Conditions) > 0 {
- for _, e := range m.Conditions {
- l = e.Size()
- n += 1 + l + sovGenerated(uint64(l))
- }
- }
- return n
-}
-
func (m *ServiceBackendPort) Size() (n int) {
if m == nil {
return 0
@@ -3006,7 +2910,6 @@ func (this *NetworkPolicy) String() string {
s := strings.Join([]string{`&NetworkPolicy{`,
`ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
`Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "NetworkPolicySpec", "NetworkPolicySpec", 1), `&`, ``, 1) + `,`,
- `Status:` + strings.Replace(strings.Replace(this.Status.String(), "NetworkPolicyStatus", "NetworkPolicyStatus", 1), `&`, ``, 1) + `,`,
`}`,
}, "")
return s
@@ -3116,21 +3019,6 @@ func (this *NetworkPolicySpec) String() string {
}, "")
return s
}
-func (this *NetworkPolicyStatus) String() string {
- if this == nil {
- return "nil"
- }
- repeatedStringForConditions := "[]Condition{"
- for _, f := range this.Conditions {
- repeatedStringForConditions += fmt.Sprintf("%v", f) + ","
- }
- repeatedStringForConditions += "}"
- s := strings.Join([]string{`&NetworkPolicyStatus{`,
- `Conditions:` + repeatedStringForConditions + `,`,
- `}`,
- }, "")
- return s
-}
func (this *ServiceBackendPort) String() string {
if this == nil {
return "nil"
@@ -5609,39 +5497,6 @@ func (m *NetworkPolicy) Unmarshal(dAtA []byte) error {
return err
}
iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
@@ -6496,90 +6351,6 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error {
}
return nil
}
-func (m *NetworkPolicyStatus) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: NetworkPolicyStatus: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: NetworkPolicyStatus: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Conditions = append(m.Conditions, v1.Condition{})
- if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipGenerated(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthGenerated
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
func (m *ServiceBackendPort) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
diff --git a/vendor/k8s.io/api/networking/v1/generated.proto b/vendor/k8s.io/api/networking/v1/generated.proto
index ed194a89d5..b50dd491e0 100644
--- a/vendor/k8s.io/api/networking/v1/generated.proto
+++ b/vendor/k8s.io/api/networking/v1/generated.proto
@@ -384,11 +384,6 @@ message NetworkPolicy {
// spec represents the specification of the desired behavior for this NetworkPolicy.
// +optional
optional NetworkPolicySpec spec = 2;
-
- // status represents the current state of the NetworkPolicy.
- // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
- // +optional
- optional NetworkPolicyStatus status = 3;
}
// NetworkPolicyEgressRule describes a particular set of traffic that is allowed out of pods
@@ -536,18 +531,6 @@ message NetworkPolicySpec {
repeated string policyTypes = 4;
}
-// NetworkPolicyStatus describes the current state of the NetworkPolicy.
-message NetworkPolicyStatus {
- // conditions holds an array of metav1.Condition that describe the state of the NetworkPolicy.
- // Current service state
- // +optional
- // +patchMergeKey=type
- // +patchStrategy=merge
- // +listType=map
- // +listMapKey=type
- repeated k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 1;
-}
-
// ServiceBackendPort is the service port being referenced.
message ServiceBackendPort {
// name is the name of the port on the Service.
diff --git a/vendor/k8s.io/api/networking/v1/types.go b/vendor/k8s.io/api/networking/v1/types.go
index fa7cf1bd70..a17e2cb5b3 100644
--- a/vendor/k8s.io/api/networking/v1/types.go
+++ b/vendor/k8s.io/api/networking/v1/types.go
@@ -38,10 +38,10 @@ type NetworkPolicy struct {
// +optional
Spec NetworkPolicySpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
- // status represents the current state of the NetworkPolicy.
- // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
- // +optional
- Status NetworkPolicyStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+ // Status is tombstoned to show why 3 is a reserved protobuf tag.
+ // This commented field should remain, so in the future if we decide to reimplement
+ // NetworkPolicyStatus a different protobuf name and tag SHOULD be used!
+ // Status NetworkPolicyStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
}
// PolicyType string describes the NetworkPolicy type
@@ -205,48 +205,6 @@ type NetworkPolicyPeer struct {
IPBlock *IPBlock `json:"ipBlock,omitempty" protobuf:"bytes,3,rep,name=ipBlock"`
}
-// NetworkPolicyConditionType is the type for status conditions on
-// a NetworkPolicy. This type should be used with the
-// NetworkPolicyStatus.Conditions field.
-type NetworkPolicyConditionType string
-
-const (
- // NetworkPolicyConditionStatusAccepted represents status of a Network Policy that could be properly parsed by
- // the Network Policy provider and will be implemented in the cluster
- NetworkPolicyConditionStatusAccepted NetworkPolicyConditionType = "Accepted"
-
- // NetworkPolicyConditionStatusPartialFailure represents status of a Network Policy that could be partially
- // parsed by the Network Policy provider and may not be completely implemented due to a lack of a feature or some
- // other condition
- NetworkPolicyConditionStatusPartialFailure NetworkPolicyConditionType = "PartialFailure"
-
- // NetworkPolicyConditionStatusFailure represents status of a Network Policy that could not be parsed by the
- // Network Policy provider and will not be implemented in the cluster
- NetworkPolicyConditionStatusFailure NetworkPolicyConditionType = "Failure"
-)
-
-// NetworkPolicyConditionReason defines the set of reasons that explain why a
-// particular NetworkPolicy condition type has been raised.
-type NetworkPolicyConditionReason string
-
-const (
- // NetworkPolicyConditionReasonFeatureNotSupported represents a reason where the Network Policy may not have been
- // implemented in the cluster due to a lack of some feature not supported by the Network Policy provider
- NetworkPolicyConditionReasonFeatureNotSupported NetworkPolicyConditionReason = "FeatureNotSupported"
-)
-
-// NetworkPolicyStatus describes the current state of the NetworkPolicy.
-type NetworkPolicyStatus struct {
- // conditions holds an array of metav1.Condition that describe the state of the NetworkPolicy.
- // Current service state
- // +optional
- // +patchMergeKey=type
- // +patchStrategy=merge
- // +listType=map
- // +listMapKey=type
- Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"`
-}
-
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// NetworkPolicyList is a list of NetworkPolicy objects.
diff --git a/vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go
index 91161d5ca4..ff080540d3 100644
--- a/vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go
+++ b/vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go
@@ -224,7 +224,6 @@ var map_NetworkPolicy = map[string]string{
"": "NetworkPolicy describes what network traffic is allowed for a set of Pods",
"metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
"spec": "spec represents the specification of the desired behavior for this NetworkPolicy.",
- "status": "status represents the current state of the NetworkPolicy. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status",
}
func (NetworkPolicy) SwaggerDoc() map[string]string {
@@ -295,15 +294,6 @@ func (NetworkPolicySpec) SwaggerDoc() map[string]string {
return map_NetworkPolicySpec
}
-var map_NetworkPolicyStatus = map[string]string{
- "": "NetworkPolicyStatus describes the current state of the NetworkPolicy.",
- "conditions": "conditions holds an array of metav1.Condition that describe the state of the NetworkPolicy. Current service state",
-}
-
-func (NetworkPolicyStatus) SwaggerDoc() map[string]string {
- return map_NetworkPolicyStatus
-}
-
var map_ServiceBackendPort = map[string]string{
"": "ServiceBackendPort is the service port being referenced.",
"name": "name is the name of the port on the Service. This is a mutually exclusive setting with \"Number\".",
diff --git a/vendor/k8s.io/api/networking/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/networking/v1/zz_generated.deepcopy.go
index c95653c918..540873833f 100644
--- a/vendor/k8s.io/api/networking/v1/zz_generated.deepcopy.go
+++ b/vendor/k8s.io/api/networking/v1/zz_generated.deepcopy.go
@@ -499,7 +499,6 @@ func (in *NetworkPolicy) DeepCopyInto(out *NetworkPolicy) {
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
- in.Status.DeepCopyInto(&out.Status)
return
}
@@ -712,29 +711,6 @@ func (in *NetworkPolicySpec) DeepCopy() *NetworkPolicySpec {
return out
}
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *NetworkPolicyStatus) DeepCopyInto(out *NetworkPolicyStatus) {
- *out = *in
- if in.Conditions != nil {
- in, out := &in.Conditions, &out.Conditions
- *out = make([]metav1.Condition, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkPolicyStatus.
-func (in *NetworkPolicyStatus) DeepCopy() *NetworkPolicyStatus {
- if in == nil {
- return nil
- }
- out := new(NetworkPolicyStatus)
- in.DeepCopyInto(out)
- return out
-}
-
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ServiceBackendPort) DeepCopyInto(out *ServiceBackendPort) {
*out = *in
diff --git a/vendor/k8s.io/api/rbac/v1/generated.proto b/vendor/k8s.io/api/rbac/v1/generated.proto
index 222f2b9052..13ff60ea71 100644
--- a/vendor/k8s.io/api/rbac/v1/generated.proto
+++ b/vendor/k8s.io/api/rbac/v1/generated.proto
@@ -66,6 +66,7 @@ message ClusterRoleBinding {
// RoleRef can only reference a ClusterRole in the global namespace.
// If the RoleRef cannot be resolved, the Authorizer must return an error.
+ // This field is immutable.
optional RoleRef roleRef = 3;
}
@@ -140,6 +141,7 @@ message RoleBinding {
// RoleRef can reference a Role in the current namespace or a ClusterRole in the global namespace.
// If the RoleRef cannot be resolved, the Authorizer must return an error.
+ // This field is immutable.
optional RoleRef roleRef = 3;
}
diff --git a/vendor/k8s.io/api/rbac/v1/types.go b/vendor/k8s.io/api/rbac/v1/types.go
index 5a8e4a85c8..ce845d69b4 100644
--- a/vendor/k8s.io/api/rbac/v1/types.go
+++ b/vendor/k8s.io/api/rbac/v1/types.go
@@ -132,6 +132,7 @@ type RoleBinding struct {
// RoleRef can reference a Role in the current namespace or a ClusterRole in the global namespace.
// If the RoleRef cannot be resolved, the Authorizer must return an error.
+ // This field is immutable.
RoleRef RoleRef `json:"roleRef" protobuf:"bytes,3,opt,name=roleRef"`
}
@@ -209,6 +210,7 @@ type ClusterRoleBinding struct {
// RoleRef can only reference a ClusterRole in the global namespace.
// If the RoleRef cannot be resolved, the Authorizer must return an error.
+ // This field is immutable.
RoleRef RoleRef `json:"roleRef" protobuf:"bytes,3,opt,name=roleRef"`
}
diff --git a/vendor/k8s.io/api/rbac/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/rbac/v1/types_swagger_doc_generated.go
index 370398198b..0471a55944 100644
--- a/vendor/k8s.io/api/rbac/v1/types_swagger_doc_generated.go
+++ b/vendor/k8s.io/api/rbac/v1/types_swagger_doc_generated.go
@@ -51,7 +51,7 @@ var map_ClusterRoleBinding = map[string]string{
"": "ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, and adds who information via Subject.",
"metadata": "Standard object's metadata.",
"subjects": "Subjects holds references to the objects the role applies to.",
- "roleRef": "RoleRef can only reference a ClusterRole in the global namespace. If the RoleRef cannot be resolved, the Authorizer must return an error.",
+ "roleRef": "RoleRef can only reference a ClusterRole in the global namespace. If the RoleRef cannot be resolved, the Authorizer must return an error. This field is immutable.",
}
func (ClusterRoleBinding) SwaggerDoc() map[string]string {
@@ -105,7 +105,7 @@ var map_RoleBinding = map[string]string{
"": "RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace. It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given namespace only have effect in that namespace.",
"metadata": "Standard object's metadata.",
"subjects": "Subjects holds references to the objects the role applies to.",
- "roleRef": "RoleRef can reference a Role in the current namespace or a ClusterRole in the global namespace. If the RoleRef cannot be resolved, the Authorizer must return an error.",
+ "roleRef": "RoleRef can reference a Role in the current namespace or a ClusterRole in the global namespace. If the RoleRef cannot be resolved, the Authorizer must return an error. This field is immutable.",
}
func (RoleBinding) SwaggerDoc() map[string]string {
diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/deepcopy.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/deepcopy.go
index 2bd5d52931..52c65ccbe3 100644
--- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/deepcopy.go
+++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/deepcopy.go
@@ -291,9 +291,11 @@ func (in *JSONSchemaProps) DeepCopy() *JSONSchemaProps {
}
if in.XValidations != nil {
- in, out := &in.XValidations, &out.XValidations
- *out = make([]ValidationRule, len(*in))
- copy(*out, *in)
+ inValidations, outValidations := &in.XValidations, &out.XValidations
+ *outValidations = make([]ValidationRule, len(*inValidations))
+ for i := range *inValidations {
+ in.XValidations[i].DeepCopyInto(&out.XValidations[i])
+ }
}
return out
diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/types_jsonschema.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/types_jsonschema.go
index 04ce206bb9..cc1c7437fc 100644
--- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/types_jsonschema.go
+++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/types_jsonschema.go
@@ -16,6 +16,26 @@ limitations under the License.
package apiextensions
+// FieldValueErrorReason is a machine-readable value providing more detail about why a field failed the validation.
+// +enum
+type FieldValueErrorReason string
+
+const (
+ // FieldValueRequired is used to report required values that are not
+ // provided (e.g. empty strings, null values, or empty arrays).
+ FieldValueRequired FieldValueErrorReason = "FieldValueRequired"
+ // FieldValueDuplicate is used to report collisions of values that must be
+ // unique (e.g. unique IDs).
+ FieldValueDuplicate FieldValueErrorReason = "FieldValueDuplicate"
+ // FieldValueInvalid is used to report malformed values (e.g. failed regex
+ // match, too long, out of bounds).
+ FieldValueInvalid FieldValueErrorReason = "FieldValueInvalid"
+ // FieldValueForbidden is used to report valid (as per formatting rules)
+ // values which would be accepted under some conditions, but which are not
+ // permitted by the current conditions (such as security policy).
+ FieldValueForbidden FieldValueErrorReason = "FieldValueForbidden"
+)
+
// JSONSchemaProps is a JSON-Schema following Specification Draft 4 (http://json-schema.org/).
type JSONSchemaProps struct {
ID string
@@ -208,6 +228,24 @@ type ValidationRule struct {
// "x must be less than max ("+string(self.max)+")"
// +optional
MessageExpression string
+ // reason provides a machine-readable validation failure reason that is returned to the caller when a request fails this validation rule.
+ // The HTTP status code returned to the caller will match the reason of the reason of the first failed validation rule.
+ // The currently supported reasons are: "FieldValueInvalid", "FieldValueForbidden", "FieldValueRequired", "FieldValueDuplicate".
+ // If not set, default to use "FieldValueInvalid".
+ // All future added reasons must be accepted by clients when reading this value and unknown reasons should be treated as FieldValueInvalid.
+ // +optional
+ Reason *FieldValueErrorReason
+ // fieldPath represents the field path returned when the validation fails.
+ // It must be a relative JSON path (i.e. with array notation) scoped to the location of this x-kubernetes-validations extension in the schema and refer to an existing field.
+ // e.g. when validation checks if a specific attribute `foo` under a map `testMap`, the fieldPath could be set to `.testMap.foo`
+ // If the validation checks two lists must have unique attributes, the fieldPath could be set to either of the list: e.g. `.testList`
+ // It does not support list numeric index.
+ // It supports child operation to refer to an existing field currently. Refer to [JSONPath support in Kubernetes](https://kubernetes.io/docs/reference/kubectl/jsonpath/) for more info.
+ // Numeric index of array is not supported.
+ // For field name which contains special characters, use `['specialName']` to refer the field name.
+ // e.g. for attribute `foo.34$` appears in a list `testList`, the fieldPath could be set to `.testList['foo.34$']`
+ // +optional
+ FieldPath string
}
// JSON represents any valid JSON value.
diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/deepcopy.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/deepcopy.go
index 28dfb99f18..c548e642d4 100644
--- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/deepcopy.go
+++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/deepcopy.go
@@ -251,9 +251,11 @@ func (in *JSONSchemaProps) DeepCopy() *JSONSchemaProps {
}
if in.XValidations != nil {
- in, out := &in.XValidations, &out.XValidations
- *out = make([]ValidationRule, len(*in))
- copy(*out, *in)
+ inValidations, outValidations := &in.XValidations, &out.XValidations
+ *outValidations = make([]ValidationRule, len(*inValidations))
+ for i := range *inValidations {
+ in.XValidations[i].DeepCopyInto(&out.XValidations[i])
+ }
}
return out
diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/generated.pb.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/generated.pb.go
index 5dbb38c8bf..75a573a2d2 100644
--- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/generated.pb.go
+++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/generated.pb.go
@@ -814,199 +814,202 @@ func init() {
}
var fileDescriptor_f5a35c9667703937 = []byte{
- // 3072 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x5a, 0xdf, 0x6f, 0x24, 0x47,
- 0xf1, 0xbf, 0x59, 0xff, 0x5a, 0xb7, 0xed, 0xb3, 0xdd, 0x77, 0xf6, 0x77, 0xce, 0xb9, 0xf3, 0xfa,
- 0x36, 0xdf, 0x1c, 0x4e, 0x72, 0x59, 0x27, 0x26, 0x21, 0x47, 0x84, 0x40, 0x5e, 0xdb, 0x97, 0x38,
- 0x67, 0x9f, 0xad, 0xde, 0xbb, 0x8b, 0x93, 0x20, 0x92, 0xf1, 0x4e, 0x7b, 0x3d, 0xf1, 0xfc, 0xba,
- 0xee, 0x99, 0xb5, 0x2d, 0x81, 0x14, 0x81, 0x22, 0x20, 0x12, 0x84, 0x07, 0x14, 0x9e, 0x10, 0x42,
- 0x28, 0x48, 0xf0, 0x00, 0x6f, 0xf0, 0x2f, 0xe4, 0x05, 0x29, 0x4f, 0x28, 0x12, 0xd2, 0x8a, 0x2c,
- 0xff, 0x00, 0x12, 0x20, 0x84, 0x1f, 0x10, 0xea, 0x1f, 0xd3, 0xd3, 0x3b, 0xbb, 0x7b, 0x77, 0xb2,
- 0xd7, 0xc9, 0xdb, 0x6e, 0x55, 0x75, 0x7d, 0xaa, 0xab, 0xab, 0xab, 0xab, 0xab, 0x07, 0x58, 0xfb,
- 0x37, 0x68, 0xc9, 0x09, 0x16, 0xf6, 0xe3, 0x1d, 0x4c, 0x7c, 0x1c, 0x61, 0xba, 0x50, 0xc7, 0xbe,
- 0x1d, 0x90, 0x05, 0xc9, 0xb0, 0x42, 0x07, 0x1f, 0x46, 0xd8, 0xa7, 0x4e, 0xe0, 0xd3, 0x67, 0xac,
- 0xd0, 0xa1, 0x98, 0xd4, 0x31, 0x59, 0x08, 0xf7, 0x6b, 0x8c, 0x47, 0x5b, 0x05, 0x16, 0xea, 0xcf,
- 0x2d, 0xd4, 0xb0, 0x8f, 0x89, 0x15, 0x61, 0xbb, 0x14, 0x92, 0x20, 0x0a, 0xe0, 0x0d, 0xa1, 0xa9,
- 0xd4, 0x22, 0xf8, 0x96, 0xd2, 0x54, 0x0a, 0xf7, 0x6b, 0x8c, 0x47, 0x5b, 0x05, 0x4a, 0xf5, 0xe7,
- 0x66, 0x9e, 0xa9, 0x39, 0xd1, 0x5e, 0xbc, 0x53, 0xaa, 0x06, 0xde, 0x42, 0x2d, 0xa8, 0x05, 0x0b,
- 0x5c, 0xe1, 0x4e, 0xbc, 0xcb, 0xff, 0xf1, 0x3f, 0xfc, 0x97, 0x00, 0x9a, 0x79, 0x3e, 0x35, 0xd9,
- 0xb3, 0xaa, 0x7b, 0x8e, 0x8f, 0xc9, 0x51, 0x6a, 0xa7, 0x87, 0x23, 0xab, 0x83, 0x79, 0x33, 0x0b,
- 0xdd, 0x46, 0x91, 0xd8, 0x8f, 0x1c, 0x0f, 0xb7, 0x0d, 0xf8, 0xca, 0xc3, 0x06, 0xd0, 0xea, 0x1e,
- 0xf6, 0xac, 0xec, 0xb8, 0xe2, 0xb1, 0x01, 0x26, 0x97, 0x03, 0xbf, 0x8e, 0x09, 0x9b, 0x20, 0xc2,
- 0xf7, 0x63, 0x4c, 0x23, 0x58, 0x06, 0x7d, 0xb1, 0x63, 0x9b, 0xc6, 0x9c, 0x31, 0x3f, 0x5c, 0x7e,
- 0xf6, 0xe3, 0x46, 0xe1, 0x5c, 0xb3, 0x51, 0xe8, 0xbb, 0xbb, 0xb6, 0x72, 0xdc, 0x28, 0x5c, 0xed,
- 0x86, 0x14, 0x1d, 0x85, 0x98, 0x96, 0xee, 0xae, 0xad, 0x20, 0x36, 0x18, 0xbe, 0x0c, 0x26, 0x6d,
- 0x4c, 0x1d, 0x82, 0xed, 0xa5, 0xad, 0xb5, 0x7b, 0x42, 0xbf, 0x99, 0xe3, 0x1a, 0x2f, 0x49, 0x8d,
- 0x93, 0x2b, 0x59, 0x01, 0xd4, 0x3e, 0x06, 0x6e, 0x83, 0xa1, 0x60, 0xe7, 0x1d, 0x5c, 0x8d, 0xa8,
- 0xd9, 0x37, 0xd7, 0x37, 0x3f, 0xb2, 0xf8, 0x4c, 0x29, 0x5d, 0x3c, 0x65, 0x02, 0x5f, 0x31, 0x39,
- 0xd9, 0x12, 0xb2, 0x0e, 0x56, 0x93, 0x45, 0x2b, 0x8f, 0x4b, 0xb4, 0xa1, 0x4d, 0xa1, 0x05, 0x25,
- 0xea, 0x8a, 0xbf, 0xca, 0x01, 0xa8, 0x4f, 0x9e, 0x86, 0x81, 0x4f, 0x71, 0x4f, 0x66, 0x4f, 0xc1,
- 0x44, 0x95, 0x6b, 0x8e, 0xb0, 0x2d, 0x71, 0xcd, 0xdc, 0x49, 0xac, 0x37, 0x25, 0xfe, 0xc4, 0x72,
- 0x46, 0x1d, 0x6a, 0x03, 0x80, 0x77, 0xc0, 0x20, 0xc1, 0x34, 0x76, 0x23, 0xb3, 0x6f, 0xce, 0x98,
- 0x1f, 0x59, 0xbc, 0xde, 0x15, 0x8a, 0x87, 0x36, 0x0b, 0xbe, 0x52, 0xfd, 0xb9, 0x52, 0x25, 0xb2,
- 0xa2, 0x98, 0x96, 0xcf, 0x4b, 0xa4, 0x41, 0xc4, 0x75, 0x20, 0xa9, 0xab, 0xf8, 0x5f, 0x03, 0x4c,
- 0xe8, 0x5e, 0xaa, 0x3b, 0xf8, 0x00, 0x12, 0x30, 0x44, 0x44, 0xb0, 0x70, 0x3f, 0x8d, 0x2c, 0xde,
- 0x2a, 0x9d, 0x74, 0x47, 0x95, 0xda, 0xe2, 0xaf, 0x3c, 0xc2, 0x96, 0x4b, 0xfe, 0x41, 0x09, 0x10,
- 0xac, 0x83, 0x3c, 0x91, 0x6b, 0xc4, 0x03, 0x69, 0x64, 0x71, 0xbd, 0x37, 0xa0, 0x42, 0x67, 0x79,
- 0xb4, 0xd9, 0x28, 0xe4, 0x93, 0x7f, 0x48, 0x61, 0x15, 0x7f, 0x91, 0x03, 0xb3, 0xcb, 0x31, 0x8d,
- 0x02, 0x0f, 0x61, 0x1a, 0xc4, 0xa4, 0x8a, 0x97, 0x03, 0x37, 0xf6, 0xfc, 0x15, 0xbc, 0xeb, 0xf8,
- 0x4e, 0xc4, 0x62, 0x74, 0x0e, 0xf4, 0xfb, 0x96, 0x87, 0x65, 0xcc, 0x8c, 0x4a, 0x4f, 0xf6, 0xdf,
- 0xb6, 0x3c, 0x8c, 0x38, 0x87, 0x49, 0xb0, 0x10, 0x91, 0x3b, 0x40, 0x49, 0xdc, 0x39, 0x0a, 0x31,
- 0xe2, 0x1c, 0x78, 0x0d, 0x0c, 0xee, 0x06, 0xc4, 0xb3, 0xc4, 0xea, 0x0d, 0xa7, 0xeb, 0x71, 0x93,
- 0x53, 0x91, 0xe4, 0xc2, 0x17, 0xc0, 0x88, 0x8d, 0x69, 0x95, 0x38, 0x21, 0x83, 0x36, 0xfb, 0xb9,
- 0xf0, 0x05, 0x29, 0x3c, 0xb2, 0x92, 0xb2, 0x90, 0x2e, 0x07, 0xaf, 0x83, 0x7c, 0x48, 0x9c, 0x80,
- 0x38, 0xd1, 0x91, 0x39, 0x30, 0x67, 0xcc, 0x0f, 0x94, 0x27, 0xe4, 0x98, 0xfc, 0x96, 0xa4, 0x23,
- 0x25, 0xc1, 0xa4, 0xdf, 0xa1, 0x81, 0xbf, 0x65, 0x45, 0x7b, 0xe6, 0x20, 0x47, 0x50, 0xd2, 0xaf,
- 0x56, 0x36, 0x6f, 0x33, 0x3a, 0x52, 0x12, 0xc5, 0x3f, 0x1b, 0xc0, 0xcc, 0x7a, 0x28, 0x71, 0x2f,
- 0xbc, 0x09, 0xf2, 0x34, 0x62, 0x39, 0xa7, 0x76, 0x24, 0xfd, 0xf3, 0x54, 0xa2, 0xaa, 0x22, 0xe9,
- 0xc7, 0x8d, 0xc2, 0x74, 0x3a, 0x22, 0xa1, 0x72, 0xdf, 0xa8, 0xb1, 0x2c, 0xe4, 0x0e, 0xf0, 0xce,
- 0x5e, 0x10, 0xec, 0xcb, 0xd5, 0x3f, 0x45, 0xc8, 0xbd, 0x26, 0x14, 0xa5, 0x98, 0x22, 0xe4, 0x24,
- 0x19, 0x25, 0x40, 0xc5, 0xff, 0xe4, 0xb2, 0x13, 0xd3, 0x16, 0xfd, 0x6d, 0x90, 0x67, 0x5b, 0xc8,
- 0xb6, 0x22, 0x4b, 0x6e, 0x82, 0x67, 0x1f, 0x6d, 0xc3, 0x89, 0xfd, 0xba, 0x81, 0x23, 0xab, 0x0c,
- 0xa5, 0x2b, 0x40, 0x4a, 0x43, 0x4a, 0x2b, 0x3c, 0x04, 0xfd, 0x34, 0xc4, 0x55, 0x39, 0xdf, 0x7b,
- 0xa7, 0x88, 0xf6, 0x2e, 0x73, 0xa8, 0x84, 0xb8, 0x9a, 0x06, 0x23, 0xfb, 0x87, 0x38, 0x22, 0x7c,
- 0xd7, 0x00, 0x83, 0x94, 0xe7, 0x05, 0x99, 0x4b, 0xb6, 0xcf, 0x00, 0x3c, 0x93, 0x77, 0xc4, 0x7f,
- 0x24, 0x71, 0x8b, 0xff, 0xcc, 0x81, 0xab, 0xdd, 0x86, 0x2e, 0x07, 0xbe, 0x2d, 0x16, 0x61, 0x4d,
- 0xee, 0x2b, 0x11, 0x59, 0x2f, 0xe8, 0xfb, 0xea, 0xb8, 0x51, 0x78, 0xe2, 0xa1, 0x0a, 0xb4, 0x0d,
- 0xf8, 0x55, 0x35, 0x65, 0xb1, 0x49, 0xaf, 0xb6, 0x1a, 0x76, 0xdc, 0x28, 0x8c, 0xab, 0x61, 0xad,
- 0xb6, 0xc2, 0x3a, 0x80, 0xae, 0x45, 0xa3, 0x3b, 0xc4, 0xf2, 0xa9, 0x50, 0xeb, 0x78, 0x58, 0x7a,
- 0xee, 0xa9, 0x47, 0x0b, 0x0a, 0x36, 0xa2, 0x3c, 0x23, 0x21, 0xe1, 0x7a, 0x9b, 0x36, 0xd4, 0x01,
- 0x81, 0xe5, 0x0c, 0x82, 0x2d, 0xaa, 0xd2, 0x80, 0x96, 0xc3, 0x19, 0x15, 0x49, 0x2e, 0x7c, 0x12,
- 0x0c, 0x79, 0x98, 0x52, 0xab, 0x86, 0xf9, 0xde, 0x1f, 0x4e, 0x0f, 0xc5, 0x0d, 0x41, 0x46, 0x09,
- 0xbf, 0xf8, 0x2f, 0x03, 0x5c, 0xee, 0xe6, 0xb5, 0x75, 0x87, 0x46, 0xf0, 0x9b, 0x6d, 0x61, 0x5f,
- 0x7a, 0xb4, 0x19, 0xb2, 0xd1, 0x3c, 0xe8, 0x55, 0x2a, 0x49, 0x28, 0x5a, 0xc8, 0x1f, 0x80, 0x01,
- 0x27, 0xc2, 0x5e, 0x72, 0x5a, 0xa2, 0xde, 0x87, 0x5d, 0x79, 0x4c, 0xc2, 0x0f, 0xac, 0x31, 0x20,
- 0x24, 0xf0, 0x8a, 0x1f, 0xe5, 0xc0, 0x95, 0x6e, 0x43, 0x58, 0x1e, 0xa7, 0xcc, 0xd9, 0xa1, 0x1b,
- 0x13, 0xcb, 0x95, 0xc1, 0xa6, 0x9c, 0xbd, 0xc5, 0xa9, 0x48, 0x72, 0x59, 0xee, 0xa4, 0x8e, 0x5f,
- 0x8b, 0x5d, 0x8b, 0xc8, 0x48, 0x52, 0x13, 0xae, 0x48, 0x3a, 0x52, 0x12, 0xb0, 0x04, 0x00, 0xdd,
- 0x0b, 0x48, 0xc4, 0x31, 0x78, 0x85, 0x33, 0x5c, 0x3e, 0xcf, 0x32, 0x42, 0x45, 0x51, 0x91, 0x26,
- 0xc1, 0x0e, 0x92, 0x7d, 0xc7, 0xb7, 0xe5, 0x82, 0xab, 0xbd, 0x7b, 0xcb, 0xf1, 0x6d, 0xc4, 0x39,
- 0x0c, 0xdf, 0x75, 0x68, 0xc4, 0x28, 0x72, 0xb5, 0x5b, 0x1c, 0xce, 0x25, 0x95, 0x04, 0xc3, 0xaf,
- 0xb2, 0x04, 0x1b, 0x10, 0x07, 0x53, 0x73, 0x30, 0xc5, 0x5f, 0x56, 0x54, 0xa4, 0x49, 0x14, 0xff,
- 0xd2, 0xdf, 0x3d, 0x3e, 0x58, 0x02, 0x81, 0x8f, 0x83, 0x81, 0x1a, 0x09, 0xe2, 0x50, 0x7a, 0x49,
- 0x79, 0xfb, 0x65, 0x46, 0x44, 0x82, 0x07, 0xbf, 0x0d, 0x06, 0x7c, 0x39, 0x61, 0x16, 0x41, 0xaf,
- 0xf5, 0x7e, 0x99, 0xb9, 0xb7, 0x52, 0x74, 0xe1, 0x48, 0x01, 0x0a, 0x9f, 0x07, 0x03, 0xb4, 0x1a,
- 0x84, 0x58, 0x3a, 0x71, 0x36, 0x11, 0xaa, 0x30, 0xe2, 0x71, 0xa3, 0x30, 0x96, 0xa8, 0xe3, 0x04,
- 0x24, 0x84, 0xe1, 0xf7, 0x0d, 0x90, 0x97, 0xc7, 0x05, 0x35, 0x87, 0x78, 0x78, 0xbe, 0xde, 0x7b,
- 0xbb, 0x65, 0xd9, 0x9b, 0xae, 0x99, 0x24, 0x50, 0xa4, 0xc0, 0xe1, 0x77, 0x0d, 0x00, 0xaa, 0xea,
- 0xec, 0x32, 0x87, 0xb9, 0x0f, 0x7b, 0xb6, 0x55, 0xb4, 0x53, 0x51, 0x04, 0x42, 0x5a, 0x2a, 0x69,
- 0xa8, 0xb0, 0x02, 0xa6, 0x42, 0x82, 0xb9, 0xee, 0xbb, 0xfe, 0xbe, 0x1f, 0x1c, 0xf8, 0x37, 0x1d,
- 0xec, 0xda, 0xd4, 0x04, 0x73, 0xc6, 0x7c, 0xbe, 0x7c, 0x45, 0xda, 0x3f, 0xb5, 0xd5, 0x49, 0x08,
- 0x75, 0x1e, 0x5b, 0x7c, 0xaf, 0x2f, 0x5b, 0x6b, 0x65, 0xcf, 0x0b, 0xf8, 0x81, 0x98, 0xbc, 0xc8,
- 0xc3, 0xd4, 0x34, 0xf8, 0x42, 0xbc, 0xd9, 0xfb, 0x85, 0x50, 0xb9, 0x3e, 0x3d, 0xa4, 0x15, 0x89,
- 0x22, 0xcd, 0x04, 0xf8, 0x53, 0x03, 0x8c, 0x59, 0xd5, 0x2a, 0x0e, 0x23, 0x6c, 0x8b, 0x6d, 0x9c,
- 0x3b, 0xdb, 0xa8, 0x9e, 0x92, 0x06, 0x8d, 0x2d, 0xe9, 0xa8, 0xa8, 0xd5, 0x08, 0xf8, 0x12, 0x38,
- 0x4f, 0xa3, 0x80, 0x60, 0x3b, 0x89, 0x20, 0x99, 0x5d, 0x60, 0xb3, 0x51, 0x38, 0x5f, 0x69, 0xe1,
- 0xa0, 0x8c, 0x64, 0xf1, 0x93, 0x01, 0x50, 0x78, 0x48, 0x84, 0x3e, 0x42, 0xd1, 0x7b, 0x0d, 0x0c,
- 0xf2, 0x99, 0xda, 0xdc, 0x21, 0x79, 0xed, 0xa8, 0xe7, 0x54, 0x24, 0xb9, 0xec, 0x78, 0x62, 0xf8,
- 0xec, 0x78, 0xea, 0xe3, 0x82, 0xea, 0x78, 0xaa, 0x08, 0x32, 0x4a, 0xf8, 0x70, 0x11, 0x00, 0x1b,
- 0x87, 0x04, 0xb3, 0x8c, 0x64, 0x9b, 0x43, 0x5c, 0x5a, 0xad, 0xcf, 0x8a, 0xe2, 0x20, 0x4d, 0x0a,
- 0xde, 0x04, 0x30, 0xf9, 0xe7, 0x04, 0xfe, 0x6b, 0x16, 0xf1, 0x1d, 0xbf, 0x66, 0xe6, 0xb9, 0xd9,
- 0xd3, 0xec, 0xb4, 0x5d, 0x69, 0xe3, 0xa2, 0x0e, 0x23, 0x60, 0x1d, 0x0c, 0x8a, 0x6b, 0x34, 0xcf,
- 0x1b, 0x3d, 0xdc, 0x71, 0xf7, 0x2c, 0xd7, 0xb1, 0x39, 0x54, 0x19, 0x70, 0xf7, 0x70, 0x14, 0x24,
- 0xd1, 0xe0, 0xfb, 0x06, 0x18, 0xa5, 0xf1, 0x0e, 0x91, 0xd2, 0x94, 0x67, 0xf5, 0x91, 0xc5, 0x3b,
- 0xbd, 0x82, 0xaf, 0x68, 0xba, 0xcb, 0x13, 0xcd, 0x46, 0x61, 0x54, 0xa7, 0xa0, 0x16, 0x6c, 0xf8,
- 0x07, 0x03, 0x98, 0x96, 0x2d, 0x42, 0xdf, 0x72, 0xb7, 0x88, 0xe3, 0x47, 0x98, 0x88, 0x0b, 0x91,
- 0x38, 0x3e, 0x7a, 0x58, 0x2b, 0x66, 0xef, 0x59, 0xe5, 0x39, 0xb9, 0xd2, 0xe6, 0x52, 0x17, 0x0b,
- 0x50, 0x57, 0xdb, 0x8a, 0xff, 0x36, 0xb2, 0xa9, 0x45, 0x9b, 0x65, 0xa5, 0x6a, 0xb9, 0x18, 0xae,
- 0x80, 0x09, 0x56, 0xfd, 0x22, 0x1c, 0xba, 0x4e, 0xd5, 0xa2, 0xfc, 0xf6, 0x23, 0xa2, 0x5b, 0x5d,
- 0xc3, 0x2b, 0x19, 0x3e, 0x6a, 0x1b, 0x01, 0x5f, 0x05, 0x50, 0x94, 0x85, 0x2d, 0x7a, 0x44, 0x25,
- 0xa0, 0x0a, 0xbc, 0x4a, 0x9b, 0x04, 0xea, 0x30, 0x0a, 0x2e, 0x83, 0x49, 0xd7, 0xda, 0xc1, 0x6e,
- 0x05, 0xbb, 0xb8, 0x1a, 0x05, 0x84, 0xab, 0x12, 0xf7, 0xc3, 0xa9, 0x66, 0xa3, 0x30, 0xb9, 0x9e,
- 0x65, 0xa2, 0x76, 0xf9, 0xe2, 0xd5, 0xec, 0x5e, 0xd6, 0x27, 0x2e, 0x8a, 0xed, 0x0f, 0x73, 0x60,
- 0xa6, 0x7b, 0x50, 0xc0, 0xef, 0xa8, 0xd2, 0x58, 0x54, 0x7c, 0xaf, 0x9f, 0x41, 0xe8, 0xc9, 0xeb,
- 0x00, 0x68, 0xbf, 0x0a, 0xc0, 0x23, 0x76, 0x5e, 0x5b, 0x6e, 0x72, 0xed, 0xdf, 0x3e, 0x0b, 0x74,
- 0xa6, 0xbf, 0x3c, 0x2c, 0xaa, 0x00, 0xcb, 0xe5, 0x87, 0xbe, 0xe5, 0xe2, 0xe2, 0x47, 0x6d, 0x57,
- 0xdb, 0x74, 0xb3, 0xc2, 0x1f, 0x18, 0x60, 0x3c, 0x08, 0xb1, 0xbf, 0xb4, 0xb5, 0x76, 0xef, 0xcb,
- 0x62, 0xd3, 0x4a, 0x07, 0xad, 0x9d, 0xdc, 0x44, 0x76, 0xbf, 0x16, 0xba, 0xb6, 0x48, 0x10, 0xd2,
- 0xf2, 0x85, 0x66, 0xa3, 0x30, 0xbe, 0xd9, 0x8a, 0x82, 0xb2, 0xb0, 0x45, 0x0f, 0x4c, 0xad, 0x1e,
- 0x46, 0x98, 0xf8, 0x96, 0xbb, 0x12, 0x54, 0x63, 0x0f, 0xfb, 0x91, 0xb0, 0x31, 0xd3, 0x2e, 0x30,
- 0x1e, 0xb1, 0x5d, 0x70, 0x05, 0xf4, 0xc5, 0xc4, 0x95, 0x51, 0x3b, 0xa2, 0x9a, 0x60, 0x68, 0x1d,
- 0x31, 0x7a, 0xf1, 0x2a, 0xe8, 0x67, 0x76, 0xc2, 0x4b, 0xa0, 0x8f, 0x58, 0x07, 0x5c, 0xeb, 0x68,
- 0x79, 0x88, 0x89, 0x20, 0xeb, 0x00, 0x31, 0x5a, 0xf1, 0xef, 0x73, 0x60, 0x3c, 0x33, 0x17, 0x38,
- 0x03, 0x72, 0xaa, 0xb3, 0x06, 0xa4, 0xd2, 0xdc, 0xda, 0x0a, 0xca, 0x39, 0x36, 0x7c, 0x51, 0x65,
- 0x57, 0x01, 0x5a, 0x50, 0x87, 0x05, 0xa7, 0xb2, 0xb2, 0x2c, 0x55, 0xc7, 0x0c, 0x49, 0xd2, 0x23,
- 0xb3, 0x01, 0xef, 0xca, 0x5d, 0x21, 0x6c, 0xc0, 0xbb, 0x88, 0xd1, 0x4e, 0xda, 0x2b, 0x49, 0x9a,
- 0x35, 0x03, 0x8f, 0xd0, 0xac, 0x19, 0x7c, 0x60, 0xb3, 0xe6, 0x71, 0x30, 0x10, 0x39, 0x91, 0x8b,
- 0xf9, 0x49, 0xa5, 0x15, 0xc3, 0x77, 0x18, 0x11, 0x09, 0x1e, 0xc4, 0x60, 0xc8, 0xc6, 0xbb, 0x56,
- 0xec, 0x46, 0xfc, 0x50, 0x1a, 0x59, 0xfc, 0xfa, 0xe9, 0xa2, 0x47, 0x34, 0x33, 0x56, 0x84, 0x4a,
- 0x94, 0xe8, 0x86, 0x4f, 0x80, 0x21, 0xcf, 0x3a, 0x74, 0xbc, 0xd8, 0xe3, 0x15, 0xa3, 0x21, 0xc4,
- 0x36, 0x04, 0x09, 0x25, 0x3c, 0x96, 0x04, 0xf1, 0x61, 0xd5, 0x8d, 0xa9, 0x53, 0xc7, 0x92, 0x29,
- 0x4b, 0x3a, 0x95, 0x04, 0x57, 0x33, 0x7c, 0xd4, 0x36, 0x82, 0x83, 0x39, 0x3e, 0x1f, 0x3c, 0xa2,
- 0x81, 0x09, 0x12, 0x4a, 0x78, 0xad, 0x60, 0x52, 0x7e, 0xb4, 0x1b, 0x98, 0x1c, 0xdc, 0x36, 0x02,
- 0x3e, 0x0d, 0x86, 0x3d, 0xeb, 0x70, 0x1d, 0xfb, 0xb5, 0x68, 0xcf, 0x1c, 0x9b, 0x33, 0xe6, 0xfb,
- 0xca, 0x63, 0xcd, 0x46, 0x61, 0x78, 0x23, 0x21, 0xa2, 0x94, 0xcf, 0x85, 0x1d, 0x5f, 0x0a, 0x9f,
- 0xd7, 0x84, 0x13, 0x22, 0x4a, 0xf9, 0xac, 0x32, 0x09, 0xad, 0x88, 0xed, 0x2b, 0x73, 0xbc, 0xf5,
- 0xe2, 0xbc, 0x25, 0xc8, 0x28, 0xe1, 0xc3, 0x79, 0x90, 0xf7, 0xac, 0x43, 0x7e, 0xa7, 0x34, 0x27,
- 0xb8, 0x5a, 0xde, 0x50, 0xdc, 0x90, 0x34, 0xa4, 0xb8, 0x5c, 0xd2, 0xf1, 0x85, 0xe4, 0xa4, 0x26,
- 0x29, 0x69, 0x48, 0x71, 0x59, 0xfc, 0xc6, 0xbe, 0x73, 0x3f, 0xc6, 0x42, 0x18, 0x72, 0xcf, 0xa8,
- 0xf8, 0xbd, 0x9b, 0xb2, 0x90, 0x2e, 0xc7, 0xee, 0x74, 0x5e, 0xec, 0x46, 0x4e, 0xe8, 0xe2, 0xcd,
- 0x5d, 0xf3, 0x02, 0xf7, 0x3f, 0x2f, 0xe5, 0x37, 0x14, 0x15, 0x69, 0x12, 0xf0, 0x6d, 0xd0, 0x8f,
- 0xfd, 0xd8, 0x33, 0x2f, 0xf2, 0xe3, 0xfb, 0xb4, 0xd1, 0xa7, 0xf6, 0xcb, 0xaa, 0x1f, 0x7b, 0x88,
- 0x6b, 0x86, 0x2f, 0x82, 0x31, 0xcf, 0x3a, 0x64, 0x49, 0x00, 0x93, 0x88, 0x5d, 0x34, 0xa7, 0xf8,
- 0xbc, 0x27, 0x59, 0x11, 0xbb, 0xa1, 0x33, 0x50, 0xab, 0x1c, 0x1f, 0xe8, 0xf8, 0xda, 0xc0, 0x69,
- 0x6d, 0xa0, 0xce, 0x40, 0xad, 0x72, 0xcc, 0xc9, 0x04, 0xdf, 0x8f, 0x1d, 0x82, 0x6d, 0xf3, 0xff,
- 0x78, 0xdd, 0x2b, 0xfb, 0xbb, 0x82, 0x86, 0x14, 0x17, 0xde, 0x4f, 0x5a, 0x0e, 0x26, 0xdf, 0x7c,
- 0x5b, 0x3d, 0x4b, 0xdd, 0x9b, 0x64, 0x89, 0x10, 0xeb, 0x48, 0x9c, 0x2a, 0x7a, 0xb3, 0x01, 0xfa,
- 0x60, 0xc0, 0x72, 0xdd, 0xcd, 0x5d, 0xf3, 0x12, 0xf7, 0x78, 0x0f, 0x4f, 0x0b, 0x95, 0x61, 0x96,
- 0x98, 0x7e, 0x24, 0x60, 0x18, 0x5e, 0xe0, 0xb3, 0x58, 0x98, 0x39, 0x33, 0xbc, 0x4d, 0xa6, 0x1f,
- 0x09, 0x18, 0x3e, 0x3f, 0xff, 0x68, 0x73, 0xd7, 0x7c, 0xec, 0xec, 0xe6, 0xc7, 0xf4, 0x23, 0x01,
- 0x03, 0x6d, 0xd0, 0xe7, 0x07, 0x91, 0x79, 0xb9, 0xd7, 0x67, 0x2f, 0x3f, 0x4d, 0x6e, 0x07, 0x11,
- 0x62, 0xea, 0xe1, 0x8f, 0x0c, 0x00, 0xc2, 0x34, 0x12, 0xaf, 0x9c, 0xb6, 0x05, 0x90, 0x41, 0x2b,
- 0xa5, 0xd1, 0xbb, 0xea, 0x47, 0xe4, 0x28, 0xbd, 0xd7, 0x68, 0x51, 0xae, 0x19, 0x00, 0x7f, 0x6e,
- 0x80, 0x8b, 0x7a, 0xb9, 0xab, 0x2c, 0x9b, 0xe5, 0x7e, 0xd8, 0xec, 0x61, 0x20, 0x97, 0x83, 0xc0,
- 0x2d, 0x9b, 0xcd, 0x46, 0xe1, 0xe2, 0x52, 0x07, 0x40, 0xd4, 0xd1, 0x0c, 0xf8, 0x1b, 0x03, 0x4c,
- 0xca, 0xec, 0xa8, 0x19, 0x57, 0xe0, 0x6e, 0x7b, 0xbb, 0x87, 0x6e, 0xcb, 0x42, 0x08, 0xef, 0xa9,
- 0x57, 0xc6, 0x36, 0x3e, 0x6a, 0xb7, 0x0a, 0xfe, 0xde, 0x00, 0xa3, 0x36, 0x0e, 0xb1, 0x6f, 0x63,
- 0xbf, 0xca, 0xcc, 0x9c, 0x3b, 0x6d, 0x5f, 0x21, 0x6b, 0xe6, 0x8a, 0xa6, 0x5d, 0x58, 0x58, 0x92,
- 0x16, 0x8e, 0xea, 0xac, 0xe3, 0x46, 0x61, 0x3a, 0x1d, 0xaa, 0x73, 0x50, 0x8b, 0x81, 0xf0, 0xc7,
- 0x06, 0x18, 0x4f, 0xdd, 0x2e, 0x0e, 0x88, 0xab, 0x67, 0xb3, 0xf0, 0xbc, 0x04, 0x5d, 0x6a, 0xc5,
- 0x42, 0x59, 0x70, 0xf8, 0x5b, 0x83, 0x55, 0x5b, 0xc9, 0x5d, 0x8d, 0x9a, 0x45, 0xee, 0xc1, 0x37,
- 0x7a, 0xe9, 0x41, 0xa5, 0x5c, 0x38, 0xf0, 0x7a, 0x5a, 0xc9, 0x29, 0xce, 0x71, 0xa3, 0x30, 0xa5,
- 0xfb, 0x4f, 0x31, 0x90, 0x6e, 0x1c, 0x7c, 0xcf, 0x00, 0xa3, 0x38, 0x2d, 0x98, 0xa9, 0xf9, 0xf8,
- 0x69, 0x5d, 0xd7, 0xb1, 0xfc, 0x16, 0xd7, 0x69, 0x8d, 0x45, 0x51, 0x0b, 0x2c, 0xab, 0xfd, 0xf0,
- 0xa1, 0xe5, 0x85, 0x2e, 0x36, 0xff, 0xbf, 0x77, 0xb5, 0xdf, 0xaa, 0x50, 0x89, 0x12, 0xdd, 0xf0,
- 0x3a, 0xc8, 0xfb, 0xb1, 0xeb, 0x5a, 0x3b, 0x2e, 0x36, 0x9f, 0xe0, 0x55, 0x84, 0xea, 0x2f, 0xde,
- 0x96, 0x74, 0xa4, 0x24, 0xe0, 0x2e, 0x98, 0x3b, 0xbc, 0xa5, 0x3e, 0xbe, 0xe8, 0xd8, 0xc0, 0x33,
- 0xaf, 0x71, 0x2d, 0x33, 0xcd, 0x46, 0x61, 0x7a, 0xbb, 0x73, 0x8b, 0xef, 0xa1, 0x3a, 0xe0, 0x9b,
- 0xe0, 0x31, 0x4d, 0x66, 0xd5, 0xdb, 0xc1, 0xb6, 0x8d, 0xed, 0xe4, 0xa2, 0x65, 0x7e, 0x89, 0x43,
- 0xa8, 0x7d, 0xbc, 0x9d, 0x15, 0x40, 0x0f, 0x1a, 0x0d, 0xd7, 0xc1, 0xb4, 0xc6, 0x5e, 0xf3, 0xa3,
- 0x4d, 0x52, 0x89, 0x88, 0xe3, 0xd7, 0xcc, 0x79, 0xae, 0xf7, 0x62, 0xb2, 0xfb, 0xb6, 0x35, 0x1e,
- 0xea, 0x32, 0x06, 0xbe, 0xd2, 0xa2, 0x8d, 0x3f, 0x5c, 0x58, 0xe1, 0x2d, 0x7c, 0x44, 0xcd, 0x27,
- 0x79, 0x71, 0xc1, 0xd7, 0x79, 0x5b, 0xa3, 0xa3, 0x2e, 0xf2, 0xf0, 0x1b, 0xe0, 0x42, 0x86, 0xc3,
- 0xee, 0x15, 0xe6, 0x53, 0xe2, 0x82, 0xc0, 0x2a, 0xd1, 0xed, 0x84, 0x88, 0x3a, 0x49, 0xc2, 0xaf,
- 0x01, 0xa8, 0x91, 0x37, 0xac, 0x90, 0x8f, 0x7f, 0x5a, 0xdc, 0x55, 0xd8, 0x8a, 0x6e, 0x4b, 0x1a,
- 0xea, 0x20, 0x07, 0x3f, 0x34, 0x5a, 0x66, 0x92, 0xde, 0x66, 0xa9, 0x79, 0x9d, 0x6f, 0xd8, 0x57,
- 0x4e, 0x1e, 0x80, 0xa9, 0x32, 0x14, 0xbb, 0x58, 0xf3, 0xb0, 0x86, 0x82, 0xba, 0xa0, 0xcf, 0xb0,
- 0xcb, 0x74, 0x26, 0x87, 0xc3, 0x09, 0xd0, 0xb7, 0x8f, 0xe5, 0xb3, 0x31, 0x62, 0x3f, 0xe1, 0x5b,
- 0x60, 0xa0, 0x6e, 0xb9, 0x71, 0xd2, 0x0a, 0xe8, 0xdd, 0x59, 0x8f, 0x84, 0xde, 0x97, 0x72, 0x37,
- 0x8c, 0x99, 0x0f, 0x0c, 0x30, 0xdd, 0xf9, 0x54, 0xf9, 0xa2, 0x2c, 0xfa, 0x99, 0x01, 0x26, 0xdb,
- 0x0e, 0x90, 0x0e, 0xc6, 0xb8, 0xad, 0xc6, 0xdc, 0xeb, 0xe1, 0x49, 0x20, 0x36, 0x02, 0xaf, 0x68,
- 0x75, 0xcb, 0x7e, 0x68, 0x80, 0x89, 0x6c, 0x62, 0xfe, 0x82, 0xbc, 0x54, 0x7c, 0x3f, 0x07, 0xa6,
- 0x3b, 0xd7, 0xe0, 0xd0, 0x53, 0xdd, 0x85, 0x9e, 0x37, 0x68, 0x3a, 0xb5, 0x6c, 0xdf, 0x35, 0xc0,
- 0xc8, 0x3b, 0x4a, 0x2e, 0x79, 0xcd, 0xec, 0x65, 0x57, 0x28, 0x39, 0xfa, 0x52, 0x06, 0x45, 0x3a,
- 0x64, 0xf1, 0x77, 0x06, 0x98, 0xea, 0x78, 0x9c, 0xc3, 0x6b, 0x60, 0xd0, 0x72, 0xdd, 0xe0, 0x40,
- 0x74, 0xf3, 0xb4, 0xb6, 0xfc, 0x12, 0xa7, 0x22, 0xc9, 0xd5, 0x7c, 0x96, 0xfb, 0x1c, 0x7c, 0x56,
- 0xfc, 0xa3, 0x01, 0x2e, 0x3f, 0x28, 0xea, 0x3e, 0xef, 0x35, 0x9c, 0x07, 0x79, 0x59, 0x6c, 0x1f,
- 0xf1, 0xf5, 0x93, 0xd9, 0x55, 0x66, 0x04, 0xfe, 0xb5, 0x8c, 0xf8, 0x55, 0xfc, 0xa5, 0x01, 0x26,
- 0x2a, 0x98, 0xd4, 0x9d, 0x2a, 0x46, 0x78, 0x17, 0x13, 0xec, 0x57, 0x31, 0x5c, 0x00, 0xc3, 0xfc,
- 0xb5, 0x31, 0xb4, 0xaa, 0xc9, 0x1b, 0xc9, 0xa4, 0x74, 0xf4, 0xf0, 0xed, 0x84, 0x81, 0x52, 0x19,
- 0xf5, 0x9e, 0x92, 0xeb, 0xfa, 0x9e, 0x72, 0x19, 0xf4, 0x87, 0x69, 0x03, 0x38, 0xcf, 0xb8, 0xbc,
- 0xe7, 0xcb, 0xa9, 0x9c, 0x1b, 0x90, 0x88, 0x77, 0xb9, 0x06, 0x24, 0x37, 0x20, 0x11, 0xe2, 0xd4,
- 0xe2, 0xaf, 0x0d, 0x70, 0xbe, 0x35, 0x3f, 0x33, 0x40, 0x12, 0xbb, 0x6d, 0x0f, 0x38, 0x8c, 0x87,
- 0x38, 0x47, 0xff, 0x6e, 0x20, 0xf7, 0xe0, 0xef, 0x06, 0xe0, 0xcb, 0x60, 0x52, 0xfe, 0x5c, 0x3d,
- 0x0c, 0x09, 0xa6, 0xfc, 0x65, 0xb2, 0xaf, 0xf5, 0x7b, 0xbf, 0x8d, 0xac, 0x00, 0x6a, 0x1f, 0x53,
- 0xfc, 0x93, 0x01, 0x2e, 0x24, 0xdf, 0xe7, 0xb8, 0x0e, 0xf6, 0xa3, 0xe5, 0xc0, 0xdf, 0x75, 0x6a,
- 0xf0, 0x92, 0xe8, 0x48, 0x6a, 0x6d, 0xbe, 0xa4, 0x1b, 0x09, 0xef, 0x83, 0x21, 0x2a, 0xdc, 0x2f,
- 0x23, 0xe3, 0xd5, 0x93, 0x47, 0x46, 0x76, 0x1d, 0x45, 0x41, 0x95, 0x50, 0x13, 0x1c, 0x16, 0x1c,
- 0x55, 0xab, 0x1c, 0xfb, 0xb6, 0xec, 0x4a, 0x8f, 0x8a, 0xe0, 0x58, 0x5e, 0x12, 0x34, 0xa4, 0xb8,
- 0xc5, 0x7f, 0x18, 0x60, 0xb2, 0xed, 0x7b, 0x23, 0xf8, 0x3d, 0x03, 0x8c, 0x56, 0xb5, 0xe9, 0xc9,
- 0x2d, 0xb6, 0x71, 0xfa, 0x6f, 0x9a, 0x34, 0xa5, 0xa2, 0x2a, 0xd1, 0x29, 0xa8, 0x05, 0x14, 0x6e,
- 0x03, 0xb3, 0x9a, 0xf9, 0xb4, 0x2f, 0xf3, 0x58, 0x78, 0xb9, 0xd9, 0x28, 0x98, 0xcb, 0x5d, 0x64,
- 0x50, 0xd7, 0xd1, 0xe5, 0x6f, 0x7d, 0xfc, 0xd9, 0xec, 0xb9, 0x4f, 0x3e, 0x9b, 0x3d, 0xf7, 0xe9,
- 0x67, 0xb3, 0xe7, 0xde, 0x6d, 0xce, 0x1a, 0x1f, 0x37, 0x67, 0x8d, 0x4f, 0x9a, 0xb3, 0xc6, 0xa7,
- 0xcd, 0x59, 0xe3, 0xaf, 0xcd, 0x59, 0xe3, 0x27, 0x7f, 0x9b, 0x3d, 0xf7, 0xc6, 0x8d, 0x93, 0x7e,
- 0xd0, 0xfb, 0xbf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x41, 0x66, 0xfd, 0x82, 0x24, 0x2c, 0x00, 0x00,
+ // 3111 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x5a, 0xdd, 0x6f, 0x5c, 0x47,
+ 0x15, 0xcf, 0x5d, 0x7b, 0xed, 0xf5, 0xd8, 0x89, 0xed, 0x49, 0x6c, 0x6e, 0xdc, 0xc4, 0xeb, 0x6c,
+ 0x68, 0x70, 0xdb, 0x74, 0xdd, 0x9a, 0x96, 0x86, 0x82, 0x40, 0x5e, 0xdb, 0x69, 0xdd, 0xd8, 0xb1,
+ 0x35, 0x9b, 0xa4, 0x6e, 0x8b, 0x68, 0xaf, 0xf7, 0x8e, 0xd7, 0xb7, 0xbe, 0x5f, 0x99, 0xb9, 0xd7,
+ 0x1f, 0x12, 0x48, 0x15, 0xa8, 0x02, 0x2a, 0x41, 0x79, 0xa8, 0xca, 0x13, 0x42, 0x08, 0xf5, 0x01,
+ 0x1e, 0xe0, 0x0d, 0xfe, 0x85, 0xbe, 0x20, 0xf5, 0x09, 0x55, 0x42, 0x5a, 0xd1, 0xe5, 0x1f, 0x40,
+ 0x02, 0x84, 0xf0, 0x03, 0x42, 0xf3, 0x71, 0xe7, 0xce, 0xde, 0xdd, 0x4d, 0x22, 0x7b, 0xdd, 0xbe,
+ 0xed, 0x9e, 0x73, 0xe6, 0xfc, 0xce, 0x9c, 0x39, 0x73, 0xe6, 0xcc, 0xb9, 0x03, 0xac, 0xdd, 0x1b,
+ 0xb4, 0xec, 0x04, 0x73, 0xbb, 0xf1, 0x16, 0x26, 0x3e, 0x8e, 0x30, 0x9d, 0xdb, 0xc3, 0xbe, 0x1d,
+ 0x90, 0x39, 0xc9, 0xb0, 0x42, 0x07, 0x1f, 0x44, 0xd8, 0xa7, 0x4e, 0xe0, 0xd3, 0xa7, 0xad, 0xd0,
+ 0xa1, 0x98, 0xec, 0x61, 0x32, 0x17, 0xee, 0xd6, 0x19, 0x8f, 0xb6, 0x0a, 0xcc, 0xed, 0x3d, 0x3b,
+ 0x57, 0xc7, 0x3e, 0x26, 0x56, 0x84, 0xed, 0x72, 0x48, 0x82, 0x28, 0x80, 0x37, 0x84, 0xa6, 0x72,
+ 0x8b, 0xe0, 0x9b, 0x4a, 0x53, 0x39, 0xdc, 0xad, 0x33, 0x1e, 0x6d, 0x15, 0x28, 0xef, 0x3d, 0x3b,
+ 0xf5, 0x74, 0xdd, 0x89, 0x76, 0xe2, 0xad, 0x72, 0x2d, 0xf0, 0xe6, 0xea, 0x41, 0x3d, 0x98, 0xe3,
+ 0x0a, 0xb7, 0xe2, 0x6d, 0xfe, 0x8f, 0xff, 0xe1, 0xbf, 0x04, 0xd0, 0xd4, 0x73, 0xa9, 0xc9, 0x9e,
+ 0x55, 0xdb, 0x71, 0x7c, 0x4c, 0x0e, 0x53, 0x3b, 0x3d, 0x1c, 0x59, 0x1d, 0xcc, 0x9b, 0x9a, 0xeb,
+ 0x36, 0x8a, 0xc4, 0x7e, 0xe4, 0x78, 0xb8, 0x6d, 0xc0, 0xd7, 0x1e, 0x36, 0x80, 0xd6, 0x76, 0xb0,
+ 0x67, 0x65, 0xc7, 0x95, 0x8e, 0x0c, 0x30, 0xbe, 0x18, 0xf8, 0x7b, 0x98, 0xb0, 0x09, 0x22, 0x7c,
+ 0x3f, 0xc6, 0x34, 0x82, 0x15, 0xd0, 0x17, 0x3b, 0xb6, 0x69, 0xcc, 0x18, 0xb3, 0x43, 0x95, 0x67,
+ 0x3e, 0x6e, 0x14, 0xcf, 0x34, 0x1b, 0xc5, 0xbe, 0xbb, 0x2b, 0x4b, 0x47, 0x8d, 0xe2, 0x95, 0x6e,
+ 0x48, 0xd1, 0x61, 0x88, 0x69, 0xf9, 0xee, 0xca, 0x12, 0x62, 0x83, 0xe1, 0x4b, 0x60, 0xdc, 0xc6,
+ 0xd4, 0x21, 0xd8, 0x5e, 0xd8, 0x58, 0xb9, 0x27, 0xf4, 0x9b, 0x39, 0xae, 0xf1, 0xa2, 0xd4, 0x38,
+ 0xbe, 0x94, 0x15, 0x40, 0xed, 0x63, 0xe0, 0x26, 0x18, 0x0c, 0xb6, 0xde, 0xc6, 0xb5, 0x88, 0x9a,
+ 0x7d, 0x33, 0x7d, 0xb3, 0xc3, 0xf3, 0x4f, 0x97, 0xd3, 0xc5, 0x53, 0x26, 0xf0, 0x15, 0x93, 0x93,
+ 0x2d, 0x23, 0x6b, 0x7f, 0x39, 0x59, 0xb4, 0xca, 0xa8, 0x44, 0x1b, 0x5c, 0x17, 0x5a, 0x50, 0xa2,
+ 0xae, 0xf4, 0x9b, 0x1c, 0x80, 0xfa, 0xe4, 0x69, 0x18, 0xf8, 0x14, 0xf7, 0x64, 0xf6, 0x14, 0x8c,
+ 0xd5, 0xb8, 0xe6, 0x08, 0xdb, 0x12, 0xd7, 0xcc, 0x1d, 0xc7, 0x7a, 0x53, 0xe2, 0x8f, 0x2d, 0x66,
+ 0xd4, 0xa1, 0x36, 0x00, 0x78, 0x07, 0x0c, 0x10, 0x4c, 0x63, 0x37, 0x32, 0xfb, 0x66, 0x8c, 0xd9,
+ 0xe1, 0xf9, 0xeb, 0x5d, 0xa1, 0x78, 0x68, 0xb3, 0xe0, 0x2b, 0xef, 0x3d, 0x5b, 0xae, 0x46, 0x56,
+ 0x14, 0xd3, 0xca, 0x39, 0x89, 0x34, 0x80, 0xb8, 0x0e, 0x24, 0x75, 0x95, 0xfe, 0x67, 0x80, 0x31,
+ 0xdd, 0x4b, 0x7b, 0x0e, 0xde, 0x87, 0x04, 0x0c, 0x12, 0x11, 0x2c, 0xdc, 0x4f, 0xc3, 0xf3, 0xb7,
+ 0xca, 0xc7, 0xdd, 0x51, 0xe5, 0xb6, 0xf8, 0xab, 0x0c, 0xb3, 0xe5, 0x92, 0x7f, 0x50, 0x02, 0x04,
+ 0xf7, 0x40, 0x81, 0xc8, 0x35, 0xe2, 0x81, 0x34, 0x3c, 0xbf, 0xda, 0x1b, 0x50, 0xa1, 0xb3, 0x32,
+ 0xd2, 0x6c, 0x14, 0x0b, 0xc9, 0x3f, 0xa4, 0xb0, 0x4a, 0xbf, 0xca, 0x81, 0xe9, 0xc5, 0x98, 0x46,
+ 0x81, 0x87, 0x30, 0x0d, 0x62, 0x52, 0xc3, 0x8b, 0x81, 0x1b, 0x7b, 0xfe, 0x12, 0xde, 0x76, 0x7c,
+ 0x27, 0x62, 0x31, 0x3a, 0x03, 0xfa, 0x7d, 0xcb, 0xc3, 0x32, 0x66, 0x46, 0xa4, 0x27, 0xfb, 0x6f,
+ 0x5b, 0x1e, 0x46, 0x9c, 0xc3, 0x24, 0x58, 0x88, 0xc8, 0x1d, 0xa0, 0x24, 0xee, 0x1c, 0x86, 0x18,
+ 0x71, 0x0e, 0xbc, 0x06, 0x06, 0xb6, 0x03, 0xe2, 0x59, 0x62, 0xf5, 0x86, 0xd2, 0xf5, 0xb8, 0xc9,
+ 0xa9, 0x48, 0x72, 0xe1, 0xf3, 0x60, 0xd8, 0xc6, 0xb4, 0x46, 0x9c, 0x90, 0x41, 0x9b, 0xfd, 0x5c,
+ 0xf8, 0xbc, 0x14, 0x1e, 0x5e, 0x4a, 0x59, 0x48, 0x97, 0x83, 0xd7, 0x41, 0x21, 0x24, 0x4e, 0x40,
+ 0x9c, 0xe8, 0xd0, 0xcc, 0xcf, 0x18, 0xb3, 0xf9, 0xca, 0x98, 0x1c, 0x53, 0xd8, 0x90, 0x74, 0xa4,
+ 0x24, 0x98, 0xf4, 0xdb, 0x34, 0xf0, 0x37, 0xac, 0x68, 0xc7, 0x1c, 0xe0, 0x08, 0x4a, 0xfa, 0x95,
+ 0xea, 0xfa, 0x6d, 0x46, 0x47, 0x4a, 0xa2, 0xf4, 0x17, 0x03, 0x98, 0x59, 0x0f, 0x25, 0xee, 0x85,
+ 0x37, 0x41, 0x81, 0x46, 0x2c, 0xe7, 0xd4, 0x0f, 0xa5, 0x7f, 0x9e, 0x4c, 0x54, 0x55, 0x25, 0xfd,
+ 0xa8, 0x51, 0x9c, 0x4c, 0x47, 0x24, 0x54, 0xee, 0x1b, 0x35, 0x96, 0x85, 0xdc, 0x3e, 0xde, 0xda,
+ 0x09, 0x82, 0x5d, 0xb9, 0xfa, 0x27, 0x08, 0xb9, 0x57, 0x85, 0xa2, 0x14, 0x53, 0x84, 0x9c, 0x24,
+ 0xa3, 0x04, 0xa8, 0xf4, 0xdf, 0x5c, 0x76, 0x62, 0xda, 0xa2, 0xbf, 0x05, 0x0a, 0x6c, 0x0b, 0xd9,
+ 0x56, 0x64, 0xc9, 0x4d, 0xf0, 0xcc, 0xa3, 0x6d, 0x38, 0xb1, 0x5f, 0xd7, 0x70, 0x64, 0x55, 0xa0,
+ 0x74, 0x05, 0x48, 0x69, 0x48, 0x69, 0x85, 0x07, 0xa0, 0x9f, 0x86, 0xb8, 0x26, 0xe7, 0x7b, 0xef,
+ 0x04, 0xd1, 0xde, 0x65, 0x0e, 0xd5, 0x10, 0xd7, 0xd2, 0x60, 0x64, 0xff, 0x10, 0x47, 0x84, 0xef,
+ 0x18, 0x60, 0x80, 0xf2, 0xbc, 0x20, 0x73, 0xc9, 0xe6, 0x29, 0x80, 0x67, 0xf2, 0x8e, 0xf8, 0x8f,
+ 0x24, 0x6e, 0xe9, 0x5f, 0x39, 0x70, 0xa5, 0xdb, 0xd0, 0xc5, 0xc0, 0xb7, 0xc5, 0x22, 0xac, 0xc8,
+ 0x7d, 0x25, 0x22, 0xeb, 0x79, 0x7d, 0x5f, 0x1d, 0x35, 0x8a, 0x8f, 0x3f, 0x54, 0x81, 0xb6, 0x01,
+ 0xbf, 0xae, 0xa6, 0x2c, 0x36, 0xe9, 0x95, 0x56, 0xc3, 0x8e, 0x1a, 0xc5, 0x51, 0x35, 0xac, 0xd5,
+ 0x56, 0xb8, 0x07, 0xa0, 0x6b, 0xd1, 0xe8, 0x0e, 0xb1, 0x7c, 0x2a, 0xd4, 0x3a, 0x1e, 0x96, 0x9e,
+ 0x7b, 0xf2, 0xd1, 0x82, 0x82, 0x8d, 0xa8, 0x4c, 0x49, 0x48, 0xb8, 0xda, 0xa6, 0x0d, 0x75, 0x40,
+ 0x60, 0x39, 0x83, 0x60, 0x8b, 0xaa, 0x34, 0xa0, 0xe5, 0x70, 0x46, 0x45, 0x92, 0x0b, 0x9f, 0x00,
+ 0x83, 0x1e, 0xa6, 0xd4, 0xaa, 0x63, 0xbe, 0xf7, 0x87, 0xd2, 0x43, 0x71, 0x4d, 0x90, 0x51, 0xc2,
+ 0x2f, 0xfd, 0xdb, 0x00, 0x97, 0xba, 0x79, 0x6d, 0xd5, 0xa1, 0x11, 0xfc, 0x4e, 0x5b, 0xd8, 0x97,
+ 0x1f, 0x6d, 0x86, 0x6c, 0x34, 0x0f, 0x7a, 0x95, 0x4a, 0x12, 0x8a, 0x16, 0xf2, 0xfb, 0x20, 0xef,
+ 0x44, 0xd8, 0x4b, 0x4e, 0x4b, 0xd4, 0xfb, 0xb0, 0xab, 0x9c, 0x95, 0xf0, 0xf9, 0x15, 0x06, 0x84,
+ 0x04, 0x5e, 0xe9, 0xa3, 0x1c, 0xb8, 0xdc, 0x6d, 0x08, 0xcb, 0xe3, 0x94, 0x39, 0x3b, 0x74, 0x63,
+ 0x62, 0xb9, 0x32, 0xd8, 0x94, 0xb3, 0x37, 0x38, 0x15, 0x49, 0x2e, 0xcb, 0x9d, 0xd4, 0xf1, 0xeb,
+ 0xb1, 0x6b, 0x11, 0x19, 0x49, 0x6a, 0xc2, 0x55, 0x49, 0x47, 0x4a, 0x02, 0x96, 0x01, 0xa0, 0x3b,
+ 0x01, 0x89, 0x38, 0x06, 0xaf, 0x70, 0x86, 0x2a, 0xe7, 0x58, 0x46, 0xa8, 0x2a, 0x2a, 0xd2, 0x24,
+ 0xd8, 0x41, 0xb2, 0xeb, 0xf8, 0xb6, 0x5c, 0x70, 0xb5, 0x77, 0x6f, 0x39, 0xbe, 0x8d, 0x38, 0x87,
+ 0xe1, 0xbb, 0x0e, 0x8d, 0x18, 0x45, 0xae, 0x76, 0x8b, 0xc3, 0xb9, 0xa4, 0x92, 0x60, 0xf8, 0x35,
+ 0x96, 0x60, 0x03, 0xe2, 0x60, 0x6a, 0x0e, 0xa4, 0xf8, 0x8b, 0x8a, 0x8a, 0x34, 0x89, 0xd2, 0x5f,
+ 0xfb, 0xbb, 0xc7, 0x07, 0x4b, 0x20, 0xf0, 0x2a, 0xc8, 0xd7, 0x49, 0x10, 0x87, 0xd2, 0x4b, 0xca,
+ 0xdb, 0x2f, 0x31, 0x22, 0x12, 0x3c, 0xf8, 0x3d, 0x90, 0xf7, 0xe5, 0x84, 0x59, 0x04, 0xbd, 0xda,
+ 0xfb, 0x65, 0xe6, 0xde, 0x4a, 0xd1, 0x85, 0x23, 0x05, 0x28, 0x7c, 0x0e, 0xe4, 0x69, 0x2d, 0x08,
+ 0xb1, 0x74, 0xe2, 0x74, 0x22, 0x54, 0x65, 0xc4, 0xa3, 0x46, 0xf1, 0x6c, 0xa2, 0x8e, 0x13, 0x90,
+ 0x10, 0x86, 0x3f, 0x32, 0x40, 0x41, 0x1e, 0x17, 0xd4, 0x1c, 0xe4, 0xe1, 0xf9, 0x5a, 0xef, 0xed,
+ 0x96, 0x65, 0x6f, 0xba, 0x66, 0x92, 0x40, 0x91, 0x02, 0x87, 0x3f, 0x30, 0x00, 0xa8, 0xa9, 0xb3,
+ 0xcb, 0x1c, 0xe2, 0x3e, 0xec, 0xd9, 0x56, 0xd1, 0x4e, 0x45, 0x11, 0x08, 0x69, 0xa9, 0xa4, 0xa1,
+ 0xc2, 0x2a, 0x98, 0x08, 0x09, 0xe6, 0xba, 0xef, 0xfa, 0xbb, 0x7e, 0xb0, 0xef, 0xdf, 0x74, 0xb0,
+ 0x6b, 0x53, 0x13, 0xcc, 0x18, 0xb3, 0x85, 0xca, 0x65, 0x69, 0xff, 0xc4, 0x46, 0x27, 0x21, 0xd4,
+ 0x79, 0x6c, 0xe9, 0xdd, 0xbe, 0x6c, 0xad, 0x95, 0x3d, 0x2f, 0xe0, 0xfb, 0x62, 0xf2, 0x22, 0x0f,
+ 0x53, 0xd3, 0xe0, 0x0b, 0xf1, 0x46, 0xef, 0x17, 0x42, 0xe5, 0xfa, 0xf4, 0x90, 0x56, 0x24, 0x8a,
+ 0x34, 0x13, 0xe0, 0x07, 0x06, 0x38, 0x6b, 0xd5, 0x6a, 0x38, 0x8c, 0xb0, 0x2d, 0xb6, 0x71, 0xee,
+ 0x74, 0xa3, 0x7a, 0x42, 0x1a, 0x74, 0x76, 0x41, 0x47, 0x45, 0xad, 0x46, 0xc0, 0x17, 0xc1, 0x39,
+ 0x1a, 0x05, 0x04, 0xdb, 0x49, 0x04, 0xc9, 0xec, 0x02, 0x9b, 0x8d, 0xe2, 0xb9, 0x6a, 0x0b, 0x07,
+ 0x65, 0x24, 0x4b, 0x9f, 0xe4, 0x41, 0xf1, 0x21, 0x11, 0xfa, 0x08, 0x45, 0xef, 0x35, 0x30, 0xc0,
+ 0x67, 0x6a, 0x73, 0x87, 0x14, 0xb4, 0xa3, 0x9e, 0x53, 0x91, 0xe4, 0xb2, 0xe3, 0x89, 0xe1, 0xb3,
+ 0xe3, 0xa9, 0x8f, 0x0b, 0xaa, 0xe3, 0xa9, 0x2a, 0xc8, 0x28, 0xe1, 0xc3, 0x79, 0x00, 0x6c, 0x1c,
+ 0x12, 0xcc, 0x32, 0x92, 0x6d, 0x0e, 0x72, 0x69, 0xb5, 0x3e, 0x4b, 0x8a, 0x83, 0x34, 0x29, 0x78,
+ 0x13, 0xc0, 0xe4, 0x9f, 0x13, 0xf8, 0xaf, 0x5a, 0xc4, 0x77, 0xfc, 0xba, 0x59, 0xe0, 0x66, 0x4f,
+ 0xb2, 0xd3, 0x76, 0xa9, 0x8d, 0x8b, 0x3a, 0x8c, 0x80, 0x7b, 0x60, 0x40, 0x5c, 0xa3, 0x79, 0xde,
+ 0xe8, 0xe1, 0x8e, 0xbb, 0x67, 0xb9, 0x8e, 0xcd, 0xa1, 0x2a, 0x80, 0xbb, 0x87, 0xa3, 0x20, 0x89,
+ 0x06, 0xdf, 0x33, 0xc0, 0x08, 0x8d, 0xb7, 0x88, 0x94, 0xa6, 0x3c, 0xab, 0x0f, 0xcf, 0xdf, 0xe9,
+ 0x15, 0x7c, 0x55, 0xd3, 0x5d, 0x19, 0x6b, 0x36, 0x8a, 0x23, 0x3a, 0x05, 0xb5, 0x60, 0xc3, 0x3f,
+ 0x1a, 0xc0, 0xb4, 0x6c, 0x11, 0xfa, 0x96, 0xbb, 0x41, 0x1c, 0x3f, 0xc2, 0x44, 0x5c, 0x88, 0xc4,
+ 0xf1, 0xd1, 0xc3, 0x5a, 0x31, 0x7b, 0xcf, 0xaa, 0xcc, 0xc8, 0x95, 0x36, 0x17, 0xba, 0x58, 0x80,
+ 0xba, 0xda, 0x56, 0xfa, 0x8f, 0x91, 0x4d, 0x2d, 0xda, 0x2c, 0xab, 0x35, 0xcb, 0xc5, 0x70, 0x09,
+ 0x8c, 0xb1, 0xea, 0x17, 0xe1, 0xd0, 0x75, 0x6a, 0x16, 0xe5, 0xb7, 0x1f, 0x11, 0xdd, 0xea, 0x1a,
+ 0x5e, 0xcd, 0xf0, 0x51, 0xdb, 0x08, 0xf8, 0x0a, 0x80, 0xa2, 0x2c, 0x6c, 0xd1, 0x23, 0x2a, 0x01,
+ 0x55, 0xe0, 0x55, 0xdb, 0x24, 0x50, 0x87, 0x51, 0x70, 0x11, 0x8c, 0xbb, 0xd6, 0x16, 0x76, 0xab,
+ 0xd8, 0xc5, 0xb5, 0x28, 0x20, 0x5c, 0x95, 0xb8, 0x1f, 0x4e, 0x34, 0x1b, 0xc5, 0xf1, 0xd5, 0x2c,
+ 0x13, 0xb5, 0xcb, 0x97, 0xae, 0x64, 0xf7, 0xb2, 0x3e, 0x71, 0x51, 0x6c, 0x7f, 0x98, 0x03, 0x53,
+ 0xdd, 0x83, 0x02, 0x7e, 0x5f, 0x95, 0xc6, 0xa2, 0xe2, 0x7b, 0xed, 0x14, 0x42, 0x4f, 0x5e, 0x07,
+ 0x40, 0xfb, 0x55, 0x00, 0x1e, 0xb2, 0xf3, 0xda, 0x72, 0x93, 0x6b, 0xff, 0xe6, 0x69, 0xa0, 0x33,
+ 0xfd, 0x95, 0x21, 0x51, 0x05, 0x58, 0x2e, 0x3f, 0xf4, 0x2d, 0x17, 0x97, 0x3e, 0x6a, 0xbb, 0xda,
+ 0xa6, 0x9b, 0x15, 0xfe, 0xd8, 0x00, 0xa3, 0x41, 0x88, 0xfd, 0x85, 0x8d, 0x95, 0x7b, 0x5f, 0x15,
+ 0x9b, 0x56, 0x3a, 0x68, 0xe5, 0xf8, 0x26, 0xb2, 0xfb, 0xb5, 0xd0, 0xb5, 0x41, 0x82, 0x90, 0x56,
+ 0xce, 0x37, 0x1b, 0xc5, 0xd1, 0xf5, 0x56, 0x14, 0x94, 0x85, 0x2d, 0x79, 0x60, 0x62, 0xf9, 0x20,
+ 0xc2, 0xc4, 0xb7, 0xdc, 0xa5, 0xa0, 0x16, 0x7b, 0xd8, 0x8f, 0x84, 0x8d, 0x99, 0x76, 0x81, 0xf1,
+ 0x88, 0xed, 0x82, 0xcb, 0xa0, 0x2f, 0x26, 0xae, 0x8c, 0xda, 0x61, 0xd5, 0x04, 0x43, 0xab, 0x88,
+ 0xd1, 0x4b, 0x57, 0x40, 0x3f, 0xb3, 0x13, 0x5e, 0x04, 0x7d, 0xc4, 0xda, 0xe7, 0x5a, 0x47, 0x2a,
+ 0x83, 0x4c, 0x04, 0x59, 0xfb, 0x88, 0xd1, 0x4a, 0xff, 0x98, 0x01, 0xa3, 0x99, 0xb9, 0xc0, 0x29,
+ 0x90, 0x53, 0x9d, 0x35, 0x20, 0x95, 0xe6, 0x56, 0x96, 0x50, 0xce, 0xb1, 0xe1, 0x0b, 0x2a, 0xbb,
+ 0x0a, 0xd0, 0xa2, 0x3a, 0x2c, 0x38, 0x95, 0x95, 0x65, 0xa9, 0x3a, 0x66, 0x48, 0x92, 0x1e, 0x99,
+ 0x0d, 0x78, 0x5b, 0xee, 0x0a, 0x61, 0x03, 0xde, 0x46, 0x8c, 0x76, 0xdc, 0x5e, 0x49, 0xd2, 0xac,
+ 0xc9, 0x3f, 0x42, 0xb3, 0x66, 0xe0, 0x81, 0xcd, 0x9a, 0xab, 0x20, 0x1f, 0x39, 0x91, 0x8b, 0xf9,
+ 0x49, 0xa5, 0x15, 0xc3, 0x77, 0x18, 0x11, 0x09, 0x1e, 0xc4, 0x60, 0xd0, 0xc6, 0xdb, 0x56, 0xec,
+ 0x46, 0xfc, 0x50, 0x1a, 0x9e, 0xff, 0xd6, 0xc9, 0xa2, 0x47, 0x34, 0x33, 0x96, 0x84, 0x4a, 0x94,
+ 0xe8, 0x86, 0x8f, 0x83, 0x41, 0xcf, 0x3a, 0x70, 0xbc, 0xd8, 0xe3, 0x15, 0xa3, 0x21, 0xc4, 0xd6,
+ 0x04, 0x09, 0x25, 0x3c, 0x96, 0x04, 0xf1, 0x41, 0xcd, 0x8d, 0xa9, 0xb3, 0x87, 0x25, 0x53, 0x96,
+ 0x74, 0x2a, 0x09, 0x2e, 0x67, 0xf8, 0xa8, 0x6d, 0x04, 0x07, 0x73, 0x7c, 0x3e, 0x78, 0x58, 0x03,
+ 0x13, 0x24, 0x94, 0xf0, 0x5a, 0xc1, 0xa4, 0xfc, 0x48, 0x37, 0x30, 0x39, 0xb8, 0x6d, 0x04, 0x7c,
+ 0x0a, 0x0c, 0x79, 0xd6, 0xc1, 0x2a, 0xf6, 0xeb, 0xd1, 0x8e, 0x79, 0x76, 0xc6, 0x98, 0xed, 0xab,
+ 0x9c, 0x6d, 0x36, 0x8a, 0x43, 0x6b, 0x09, 0x11, 0xa5, 0x7c, 0x2e, 0xec, 0xf8, 0x52, 0xf8, 0x9c,
+ 0x26, 0x9c, 0x10, 0x51, 0xca, 0x67, 0x95, 0x49, 0x68, 0x45, 0x6c, 0x5f, 0x99, 0xa3, 0xad, 0x17,
+ 0xe7, 0x0d, 0x41, 0x46, 0x09, 0x1f, 0xce, 0x82, 0x82, 0x67, 0x1d, 0xf0, 0x3b, 0xa5, 0x39, 0xc6,
+ 0xd5, 0xf2, 0x86, 0xe2, 0x9a, 0xa4, 0x21, 0xc5, 0xe5, 0x92, 0x8e, 0x2f, 0x24, 0xc7, 0x35, 0x49,
+ 0x49, 0x43, 0x8a, 0xcb, 0xe2, 0x37, 0xf6, 0x9d, 0xfb, 0x31, 0x16, 0xc2, 0x90, 0x7b, 0x46, 0xc5,
+ 0xef, 0xdd, 0x94, 0x85, 0x74, 0x39, 0x76, 0xa7, 0xf3, 0x62, 0x37, 0x72, 0x42, 0x17, 0xaf, 0x6f,
+ 0x9b, 0xe7, 0xb9, 0xff, 0x79, 0x29, 0xbf, 0xa6, 0xa8, 0x48, 0x93, 0x80, 0x6f, 0x81, 0x7e, 0xec,
+ 0xc7, 0x9e, 0x79, 0x81, 0x1f, 0xdf, 0x27, 0x8d, 0x3e, 0xb5, 0x5f, 0x96, 0xfd, 0xd8, 0x43, 0x5c,
+ 0x33, 0x7c, 0x01, 0x9c, 0xf5, 0xac, 0x03, 0x96, 0x04, 0x30, 0x89, 0xd8, 0x45, 0x73, 0x82, 0xcf,
+ 0x7b, 0x9c, 0x15, 0xb1, 0x6b, 0x3a, 0x03, 0xb5, 0xca, 0xf1, 0x81, 0x8e, 0xaf, 0x0d, 0x9c, 0xd4,
+ 0x06, 0xea, 0x0c, 0xd4, 0x2a, 0xc7, 0x9c, 0x4c, 0xf0, 0xfd, 0xd8, 0x21, 0xd8, 0x36, 0xbf, 0xc4,
+ 0xeb, 0x5e, 0xd9, 0xdf, 0x15, 0x34, 0xa4, 0xb8, 0xf0, 0x7e, 0xd2, 0x72, 0x30, 0xf9, 0xe6, 0xdb,
+ 0xe8, 0x59, 0xea, 0x5e, 0x27, 0x0b, 0x84, 0x58, 0x87, 0xe2, 0x54, 0xd1, 0x9b, 0x0d, 0xd0, 0x07,
+ 0x79, 0xcb, 0x75, 0xd7, 0xb7, 0xcd, 0x8b, 0xdc, 0xe3, 0x3d, 0x3c, 0x2d, 0x54, 0x86, 0x59, 0x60,
+ 0xfa, 0x91, 0x80, 0x61, 0x78, 0x81, 0xcf, 0x62, 0x61, 0xea, 0xd4, 0xf0, 0xd6, 0x99, 0x7e, 0x24,
+ 0x60, 0xf8, 0xfc, 0xfc, 0xc3, 0xf5, 0x6d, 0xf3, 0xb1, 0xd3, 0x9b, 0x1f, 0xd3, 0x8f, 0x04, 0x0c,
+ 0xb4, 0x41, 0x9f, 0x1f, 0x44, 0xe6, 0xa5, 0x5e, 0x9f, 0xbd, 0xfc, 0x34, 0xb9, 0x1d, 0x44, 0x88,
+ 0xa9, 0x87, 0x3f, 0x35, 0x00, 0x08, 0xd3, 0x48, 0xbc, 0x7c, 0xd2, 0x16, 0x40, 0x06, 0xad, 0x9c,
+ 0x46, 0xef, 0xb2, 0x1f, 0x91, 0xc3, 0xf4, 0x5e, 0xa3, 0x45, 0xb9, 0x66, 0x00, 0xfc, 0xa5, 0x01,
+ 0x2e, 0xe8, 0xe5, 0xae, 0xb2, 0x6c, 0x9a, 0xfb, 0x61, 0xbd, 0x87, 0x81, 0x5c, 0x09, 0x02, 0xb7,
+ 0x62, 0x36, 0x1b, 0xc5, 0x0b, 0x0b, 0x1d, 0x00, 0x51, 0x47, 0x33, 0xe0, 0x6f, 0x0d, 0x30, 0x2e,
+ 0xb3, 0xa3, 0x66, 0x5c, 0x91, 0xbb, 0xed, 0xad, 0x1e, 0xba, 0x2d, 0x0b, 0x21, 0xbc, 0xa7, 0xbe,
+ 0x32, 0xb6, 0xf1, 0x51, 0xbb, 0x55, 0xf0, 0x0f, 0x06, 0x18, 0xb1, 0x71, 0x88, 0x7d, 0x1b, 0xfb,
+ 0x35, 0x66, 0xe6, 0xcc, 0x49, 0xfb, 0x0a, 0x59, 0x33, 0x97, 0x34, 0xed, 0xc2, 0xc2, 0xb2, 0xb4,
+ 0x70, 0x44, 0x67, 0x1d, 0x35, 0x8a, 0x93, 0xe9, 0x50, 0x9d, 0x83, 0x5a, 0x0c, 0x84, 0x3f, 0x33,
+ 0xc0, 0x68, 0xea, 0x76, 0x71, 0x40, 0x5c, 0x39, 0x9d, 0x85, 0xe7, 0x25, 0xe8, 0x42, 0x2b, 0x16,
+ 0xca, 0x82, 0xc3, 0xdf, 0x19, 0xac, 0xda, 0x4a, 0xee, 0x6a, 0xd4, 0x2c, 0x71, 0x0f, 0xbe, 0xde,
+ 0x4b, 0x0f, 0x2a, 0xe5, 0xc2, 0x81, 0xd7, 0xd3, 0x4a, 0x4e, 0x71, 0x8e, 0x1a, 0xc5, 0x09, 0xdd,
+ 0x7f, 0x8a, 0x81, 0x74, 0xe3, 0xe0, 0xbb, 0x06, 0x18, 0xc1, 0x69, 0xc1, 0x4c, 0xcd, 0xab, 0x27,
+ 0x75, 0x5d, 0xc7, 0xf2, 0x5b, 0x5c, 0xa7, 0x35, 0x16, 0x45, 0x2d, 0xb0, 0xac, 0xf6, 0xc3, 0x07,
+ 0x96, 0x17, 0xba, 0xd8, 0xfc, 0x72, 0xef, 0x6a, 0xbf, 0x65, 0xa1, 0x12, 0x25, 0xba, 0xe1, 0x75,
+ 0x50, 0xf0, 0x63, 0xd7, 0xb5, 0xb6, 0x5c, 0x6c, 0x3e, 0xce, 0xab, 0x08, 0xd5, 0x5f, 0xbc, 0x2d,
+ 0xe9, 0x48, 0x49, 0xc0, 0x6d, 0x30, 0x73, 0x70, 0x4b, 0x3d, 0xbe, 0xe8, 0xd8, 0xc0, 0x33, 0xaf,
+ 0x71, 0x2d, 0x53, 0xcd, 0x46, 0x71, 0x72, 0xb3, 0x73, 0x8b, 0xef, 0xa1, 0x3a, 0xe0, 0x1b, 0xe0,
+ 0x31, 0x4d, 0x66, 0xd9, 0xdb, 0xc2, 0xb6, 0x8d, 0xed, 0xe4, 0xa2, 0x65, 0x7e, 0x85, 0x43, 0xa8,
+ 0x7d, 0xbc, 0x99, 0x15, 0x40, 0x0f, 0x1a, 0x0d, 0x57, 0xc1, 0xa4, 0xc6, 0x5e, 0xf1, 0xa3, 0x75,
+ 0x52, 0x8d, 0x88, 0xe3, 0xd7, 0xcd, 0x59, 0xae, 0xf7, 0x42, 0xb2, 0xfb, 0x36, 0x35, 0x1e, 0xea,
+ 0x32, 0x06, 0xbe, 0xdc, 0xa2, 0x8d, 0x7f, 0xb8, 0xb0, 0xc2, 0x5b, 0xf8, 0x90, 0x9a, 0x4f, 0xf0,
+ 0xe2, 0x82, 0xaf, 0xf3, 0xa6, 0x46, 0x47, 0x5d, 0xe4, 0xe1, 0xb7, 0xc1, 0xf9, 0x0c, 0x87, 0xdd,
+ 0x2b, 0xcc, 0x27, 0xc5, 0x05, 0x81, 0x55, 0xa2, 0x9b, 0x09, 0x11, 0x75, 0x92, 0x84, 0xdf, 0x04,
+ 0x50, 0x23, 0xaf, 0x59, 0x21, 0x1f, 0xff, 0x94, 0xb8, 0xab, 0xb0, 0x15, 0xdd, 0x94, 0x34, 0xd4,
+ 0x41, 0x0e, 0x7e, 0x68, 0xb4, 0xcc, 0x24, 0xbd, 0xcd, 0x52, 0xf3, 0x3a, 0xdf, 0xb0, 0x2f, 0x1f,
+ 0x3f, 0x00, 0x53, 0x65, 0x28, 0x76, 0xb1, 0xe6, 0x61, 0x0d, 0x05, 0x75, 0x41, 0x9f, 0x62, 0x97,
+ 0xe9, 0x4c, 0x0e, 0x87, 0x63, 0xa0, 0x6f, 0x17, 0xcb, 0xcf, 0xc6, 0x88, 0xfd, 0x84, 0x6f, 0x82,
+ 0xfc, 0x9e, 0xe5, 0xc6, 0x49, 0x2b, 0xa0, 0x77, 0x67, 0x3d, 0x12, 0x7a, 0x5f, 0xcc, 0xdd, 0x30,
+ 0xa6, 0xde, 0x37, 0xc0, 0x64, 0xe7, 0x53, 0xe5, 0x8b, 0xb2, 0xe8, 0x17, 0x06, 0x18, 0x6f, 0x3b,
+ 0x40, 0x3a, 0x18, 0xe3, 0xb6, 0x1a, 0x73, 0xaf, 0x87, 0x27, 0x81, 0xd8, 0x08, 0xbc, 0xa2, 0xd5,
+ 0x2d, 0xfb, 0x89, 0x01, 0xc6, 0xb2, 0x89, 0xf9, 0x0b, 0xf2, 0x52, 0xe9, 0xbd, 0x1c, 0x98, 0xec,
+ 0x5c, 0x83, 0x43, 0x4f, 0x75, 0x17, 0x7a, 0xde, 0xa0, 0xe9, 0xd4, 0xb2, 0x7d, 0xc7, 0x00, 0xc3,
+ 0x6f, 0x2b, 0xb9, 0xe4, 0x6b, 0x66, 0x2f, 0xbb, 0x42, 0xc9, 0xd1, 0x97, 0x32, 0x28, 0xd2, 0x21,
+ 0x4b, 0xbf, 0x37, 0xc0, 0x44, 0xc7, 0xe3, 0x1c, 0x5e, 0x03, 0x03, 0x96, 0xeb, 0x06, 0xfb, 0xa2,
+ 0x9b, 0xa7, 0xb5, 0xe5, 0x17, 0x38, 0x15, 0x49, 0xae, 0xe6, 0xb3, 0xdc, 0xe7, 0xe0, 0xb3, 0xd2,
+ 0x9f, 0x0c, 0x70, 0xe9, 0x41, 0x51, 0xf7, 0x79, 0xaf, 0xe1, 0x2c, 0x28, 0xc8, 0x62, 0xfb, 0x90,
+ 0xaf, 0x9f, 0xcc, 0xae, 0x32, 0x23, 0xf0, 0xd7, 0x32, 0xe2, 0x57, 0xe9, 0xd7, 0x06, 0x18, 0xab,
+ 0x62, 0xb2, 0xe7, 0xd4, 0x30, 0xc2, 0xdb, 0x98, 0x60, 0xbf, 0x86, 0xe1, 0x1c, 0x18, 0xe2, 0x5f,
+ 0x1b, 0x43, 0xab, 0x96, 0x7c, 0x23, 0x19, 0x97, 0x8e, 0x1e, 0xba, 0x9d, 0x30, 0x50, 0x2a, 0xa3,
+ 0xbe, 0xa7, 0xe4, 0xba, 0x7e, 0x4f, 0xb9, 0x04, 0xfa, 0xc3, 0xb4, 0x01, 0x5c, 0x60, 0x5c, 0xde,
+ 0xf3, 0xe5, 0x54, 0xce, 0x0d, 0x48, 0xc4, 0xbb, 0x5c, 0x79, 0xc9, 0x0d, 0x48, 0x84, 0x38, 0xb5,
+ 0xf4, 0x41, 0x0e, 0x9c, 0x6b, 0xcd, 0xcf, 0x0c, 0x90, 0xc4, 0x6e, 0xdb, 0x07, 0x1c, 0xc6, 0x43,
+ 0x9c, 0xa3, 0xbf, 0x1b, 0xc8, 0x3d, 0xf8, 0xdd, 0x00, 0x7c, 0x09, 0x8c, 0xcb, 0x9f, 0xcb, 0x07,
+ 0x21, 0xc1, 0x94, 0x7f, 0x99, 0xec, 0x6b, 0x7d, 0xef, 0xb7, 0x96, 0x15, 0x40, 0xed, 0x63, 0xe0,
+ 0x37, 0x32, 0x6f, 0x1a, 0xae, 0xa6, 0xef, 0x19, 0x58, 0x6d, 0xc7, 0x4b, 0x87, 0x7b, 0x6c, 0xcb,
+ 0x2f, 0x13, 0x12, 0x90, 0xcc, 0x43, 0x87, 0x39, 0x30, 0xb4, 0xcd, 0x04, 0x78, 0x9f, 0x3c, 0xdf,
+ 0xea, 0xf4, 0x9b, 0x09, 0x03, 0xa5, 0x32, 0xa5, 0x3f, 0x1b, 0xe0, 0x7c, 0xf2, 0x1a, 0xc8, 0x75,
+ 0xb0, 0x1f, 0x2d, 0x06, 0xfe, 0xb6, 0x53, 0x87, 0x17, 0x45, 0xff, 0x53, 0x6b, 0x2a, 0x26, 0xbd,
+ 0x4f, 0x78, 0x1f, 0x0c, 0x52, 0xb1, 0xd8, 0x32, 0x0e, 0x5f, 0x39, 0x7e, 0x1c, 0x66, 0xa3, 0x46,
+ 0x94, 0x6f, 0x09, 0x35, 0xc1, 0x61, 0xa1, 0x58, 0xb3, 0x2a, 0xb1, 0x6f, 0xcb, 0x1e, 0xf8, 0x88,
+ 0x08, 0xc5, 0xc5, 0x05, 0x41, 0x43, 0x8a, 0x5b, 0xfa, 0xa7, 0x01, 0xc6, 0xdb, 0x5e, 0x37, 0xc1,
+ 0x1f, 0x1a, 0x60, 0xa4, 0xa6, 0x4d, 0x4f, 0x6e, 0xe8, 0xb5, 0x93, 0xbf, 0xa0, 0xd2, 0x94, 0x8a,
+ 0x1a, 0x48, 0xa7, 0xa0, 0x16, 0x50, 0xb8, 0x09, 0xcc, 0x5a, 0xe6, 0x21, 0x61, 0xe6, 0xd3, 0xe4,
+ 0xa5, 0x66, 0xa3, 0x68, 0x2e, 0x76, 0x91, 0x41, 0x5d, 0x47, 0x57, 0xbe, 0xfb, 0xf1, 0x67, 0xd3,
+ 0x67, 0x3e, 0xf9, 0x6c, 0xfa, 0xcc, 0xa7, 0x9f, 0x4d, 0x9f, 0x79, 0xa7, 0x39, 0x6d, 0x7c, 0xdc,
+ 0x9c, 0x36, 0x3e, 0x69, 0x4e, 0x1b, 0x9f, 0x36, 0xa7, 0x8d, 0xbf, 0x35, 0xa7, 0x8d, 0x9f, 0xff,
+ 0x7d, 0xfa, 0xcc, 0xeb, 0x37, 0x8e, 0xfb, 0x7c, 0xf8, 0xff, 0x01, 0x00, 0x00, 0xff, 0xff, 0x1d,
+ 0x01, 0xc1, 0x04, 0x92, 0x2c, 0x00, 0x00,
}
func (m *ConversionRequest) Marshal() (dAtA []byte, err error) {
@@ -2630,6 +2633,18 @@ func (m *ValidationRule) MarshalToSizedBuffer(dAtA []byte) (int, error) {
_ = i
var l int
_ = l
+ i -= len(m.FieldPath)
+ copy(dAtA[i:], m.FieldPath)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldPath)))
+ i--
+ dAtA[i] = 0x2a
+ if m.Reason != nil {
+ i -= len(*m.Reason)
+ copy(dAtA[i:], *m.Reason)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Reason)))
+ i--
+ dAtA[i] = 0x22
+ }
i -= len(m.MessageExpression)
copy(dAtA[i:], m.MessageExpression)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.MessageExpression)))
@@ -3346,6 +3361,12 @@ func (m *ValidationRule) Size() (n int) {
n += 1 + l + sovGenerated(uint64(l))
l = len(m.MessageExpression)
n += 1 + l + sovGenerated(uint64(l))
+ if m.Reason != nil {
+ l = len(*m.Reason)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ l = len(m.FieldPath)
+ n += 1 + l + sovGenerated(uint64(l))
return n
}
@@ -3822,6 +3843,8 @@ func (this *ValidationRule) String() string {
`Rule:` + fmt.Sprintf("%v", this.Rule) + `,`,
`Message:` + fmt.Sprintf("%v", this.Message) + `,`,
`MessageExpression:` + fmt.Sprintf("%v", this.MessageExpression) + `,`,
+ `Reason:` + valueToStringGenerated(this.Reason) + `,`,
+ `FieldPath:` + fmt.Sprintf("%v", this.FieldPath) + `,`,
`}`,
}, "")
return s
@@ -8920,6 +8943,71 @@ func (m *ValidationRule) Unmarshal(dAtA []byte) error {
}
m.MessageExpression = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := FieldValueErrorReason(dAtA[iNdEx:postIndex])
+ m.Reason = &s
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field FieldPath", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.FieldPath = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/generated.proto b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/generated.proto
index 4632a83e59..578d018a7b 100644
--- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/generated.proto
+++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/generated.proto
@@ -678,6 +678,26 @@ message ValidationRule {
// "x must be less than max ("+string(self.max)+")"
// +optional
optional string messageExpression = 3;
+
+ // reason provides a machine-readable validation failure reason that is returned to the caller when a request fails this validation rule.
+ // The HTTP status code returned to the caller will match the reason of the reason of the first failed validation rule.
+ // The currently supported reasons are: "FieldValueInvalid", "FieldValueForbidden", "FieldValueRequired", "FieldValueDuplicate".
+ // If not set, default to use "FieldValueInvalid".
+ // All future added reasons must be accepted by clients when reading this value and unknown reasons should be treated as FieldValueInvalid.
+ // +optional
+ optional string reason = 4;
+
+ // fieldPath represents the field path returned when the validation fails.
+ // It must be a relative JSON path (i.e. with array notation) scoped to the location of this x-kubernetes-validations extension in the schema and refer to an existing field.
+ // e.g. when validation checks if a specific attribute `foo` under a map `testMap`, the fieldPath could be set to `.testMap.foo`
+ // If the validation checks two lists must have unique attributes, the fieldPath could be set to either of the list: e.g. `.testList`
+ // It does not support list numeric index.
+ // It supports child operation to refer to an existing field currently. Refer to [JSONPath support in Kubernetes](https://kubernetes.io/docs/reference/kubectl/jsonpath/) for more info.
+ // Numeric index of array is not supported.
+ // For field name which contains special characters, use `['specialName']` to refer the field name.
+ // e.g. for attribute `foo.34$` appears in a list `testList`, the fieldPath could be set to `.testList['foo.34$']`
+ // +optional
+ optional string fieldPath = 5;
}
// WebhookClientConfig contains the information to make a TLS connection with the webhook.
diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/types_jsonschema.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/types_jsonschema.go
index b348d0d19b..1c90d464a9 100644
--- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/types_jsonschema.go
+++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/types_jsonschema.go
@@ -16,6 +16,26 @@ limitations under the License.
package v1
+// FieldValueErrorReason is a machine-readable value providing more detail about why a field failed the validation.
+// +enum
+type FieldValueErrorReason string
+
+const (
+ // FieldValueRequired is used to report required values that are not
+ // provided (e.g. empty strings, null values, or empty arrays).
+ FieldValueRequired FieldValueErrorReason = "FieldValueRequired"
+ // FieldValueDuplicate is used to report collisions of values that must be
+ // unique (e.g. unique IDs).
+ FieldValueDuplicate FieldValueErrorReason = "FieldValueDuplicate"
+ // FieldValueInvalid is used to report malformed values (e.g. failed regex
+ // match, too long, out of bounds).
+ FieldValueInvalid FieldValueErrorReason = "FieldValueInvalid"
+ // FieldValueForbidden is used to report valid (as per formatting rules)
+ // values which would be accepted under some conditions, but which are not
+ // permitted by the current conditions (such as security policy).
+ FieldValueForbidden FieldValueErrorReason = "FieldValueForbidden"
+)
+
// JSONSchemaProps is a JSON-Schema following Specification Draft 4 (http://json-schema.org/).
type JSONSchemaProps struct {
ID string `json:"id,omitempty" protobuf:"bytes,1,opt,name=id"`
@@ -247,6 +267,24 @@ type ValidationRule struct {
// "x must be less than max ("+string(self.max)+")"
// +optional
MessageExpression string `json:"messageExpression,omitempty" protobuf:"bytes,3,opt,name=messageExpression"`
+ // reason provides a machine-readable validation failure reason that is returned to the caller when a request fails this validation rule.
+ // The HTTP status code returned to the caller will match the reason of the reason of the first failed validation rule.
+ // The currently supported reasons are: "FieldValueInvalid", "FieldValueForbidden", "FieldValueRequired", "FieldValueDuplicate".
+ // If not set, default to use "FieldValueInvalid".
+ // All future added reasons must be accepted by clients when reading this value and unknown reasons should be treated as FieldValueInvalid.
+ // +optional
+ Reason *FieldValueErrorReason `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"`
+ // fieldPath represents the field path returned when the validation fails.
+ // It must be a relative JSON path (i.e. with array notation) scoped to the location of this x-kubernetes-validations extension in the schema and refer to an existing field.
+ // e.g. when validation checks if a specific attribute `foo` under a map `testMap`, the fieldPath could be set to `.testMap.foo`
+ // If the validation checks two lists must have unique attributes, the fieldPath could be set to either of the list: e.g. `.testList`
+ // It does not support list numeric index.
+ // It supports child operation to refer to an existing field currently. Refer to [JSONPath support in Kubernetes](https://kubernetes.io/docs/reference/kubectl/jsonpath/) for more info.
+ // Numeric index of array is not supported.
+ // For field name which contains special characters, use `['specialName']` to refer the field name.
+ // e.g. for attribute `foo.34$` appears in a list `testList`, the fieldPath could be set to `.testList['foo.34$']`
+ // +optional
+ FieldPath string `json:"fieldPath,omitempty" protobuf:"bytes,5,opt,name=fieldPath"`
}
// JSON represents any valid JSON value.
diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/zz_generated.conversion.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/zz_generated.conversion.go
index cde5275cef..0a82e4d8c3 100644
--- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/zz_generated.conversion.go
+++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/zz_generated.conversion.go
@@ -1259,6 +1259,8 @@ func autoConvert_v1_ValidationRule_To_apiextensions_ValidationRule(in *Validatio
out.Rule = in.Rule
out.Message = in.Message
out.MessageExpression = in.MessageExpression
+ out.Reason = (*apiextensions.FieldValueErrorReason)(unsafe.Pointer(in.Reason))
+ out.FieldPath = in.FieldPath
return nil
}
@@ -1271,6 +1273,8 @@ func autoConvert_apiextensions_ValidationRule_To_v1_ValidationRule(in *apiextens
out.Rule = in.Rule
out.Message = in.Message
out.MessageExpression = in.MessageExpression
+ out.Reason = (*FieldValueErrorReason)(unsafe.Pointer(in.Reason))
+ out.FieldPath = in.FieldPath
return nil
}
diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/zz_generated.deepcopy.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/zz_generated.deepcopy.go
index e27daa9a35..b4347b8db1 100644
--- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/zz_generated.deepcopy.go
+++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/zz_generated.deepcopy.go
@@ -614,6 +614,11 @@ func (in *ServiceReference) DeepCopy() *ServiceReference {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ValidationRule) DeepCopyInto(out *ValidationRule) {
*out = *in
+ if in.Reason != nil {
+ in, out := &in.Reason, &out.Reason
+ *out = new(FieldValueErrorReason)
+ **out = **in
+ }
return
}
@@ -632,7 +637,9 @@ func (in ValidationRules) DeepCopyInto(out *ValidationRules) {
{
in := &in
*out = make(ValidationRules, len(*in))
- copy(*out, *in)
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
return
}
}
diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/zz_generated.deepcopy.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/zz_generated.deepcopy.go
index 998c9dbe27..f8a5ffbfbb 100644
--- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/zz_generated.deepcopy.go
+++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/zz_generated.deepcopy.go
@@ -531,6 +531,11 @@ func (in *ServiceReference) DeepCopy() *ServiceReference {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ValidationRule) DeepCopyInto(out *ValidationRule) {
*out = *in
+ if in.Reason != nil {
+ in, out := &in.Reason, &out.Reason
+ *out = new(FieldValueErrorReason)
+ **out = **in
+ }
return
}
@@ -549,7 +554,9 @@ func (in ValidationRules) DeepCopyInto(out *ValidationRules) {
{
in := &in
*out = make(ValidationRules, len(*in))
- copy(*out, *in)
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
return
}
}
diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/features/OWNERS b/vendor/k8s.io/apiextensions-apiserver/pkg/features/OWNERS
new file mode 100644
index 0000000000..3e1dd9f081
--- /dev/null
+++ b/vendor/k8s.io/apiextensions-apiserver/pkg/features/OWNERS
@@ -0,0 +1,4 @@
+# See the OWNERS docs at https://go.k8s.io/owners
+
+approvers:
+ - feature-approvers
diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/features/kube_features.go b/vendor/k8s.io/apiextensions-apiserver/pkg/features/kube_features.go
new file mode 100644
index 0000000000..1844ed8d1e
--- /dev/null
+++ b/vendor/k8s.io/apiextensions-apiserver/pkg/features/kube_features.go
@@ -0,0 +1,48 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package features
+
+import (
+ utilfeature "k8s.io/apiserver/pkg/util/feature"
+ "k8s.io/component-base/featuregate"
+)
+
+const (
+ // Every feature gate should add method here following this template:
+ //
+ // // owner: @username
+ // // alpha: v1.4
+ // MyFeature() bool
+
+ // owner: @alexzielenski
+ // alpha: v1.28
+ //
+ // Ignores errors raised on unchanged fields of Custom Resources
+ // across UPDATE/PATCH requests.
+ CRDValidationRatcheting featuregate.Feature = "CRDValidationRatcheting"
+)
+
+func init() {
+ utilfeature.DefaultMutableFeatureGate.Add(defaultKubernetesFeatureGates)
+}
+
+// defaultKubernetesFeatureGates consists of all known Kubernetes-specific feature keys.
+// To add a new feature, define a key for it above and add it here. The features will be
+// available throughout Kubernetes binaries.
+var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureSpec{
+ CRDValidationRatcheting: {Default: false, PreRelease: featuregate.Alpha},
+}
diff --git a/vendor/k8s.io/apiserver/pkg/util/wsstream/conn.go b/vendor/k8s.io/apimachinery/pkg/util/httpstream/wsstream/conn.go
similarity index 100%
rename from vendor/k8s.io/apiserver/pkg/util/wsstream/conn.go
rename to vendor/k8s.io/apimachinery/pkg/util/httpstream/wsstream/conn.go
diff --git a/vendor/k8s.io/apiserver/pkg/util/wsstream/doc.go b/vendor/k8s.io/apimachinery/pkg/util/httpstream/wsstream/doc.go
similarity index 91%
rename from vendor/k8s.io/apiserver/pkg/util/wsstream/doc.go
rename to vendor/k8s.io/apimachinery/pkg/util/httpstream/wsstream/doc.go
index 694ce81d20..a1aa1688bd 100644
--- a/vendor/k8s.io/apiserver/pkg/util/wsstream/doc.go
+++ b/vendor/k8s.io/apimachinery/pkg/util/httpstream/wsstream/doc.go
@@ -18,4 +18,4 @@ limitations under the License.
// The Conn type allows callers to multiplex multiple read/write channels over
// a single websocket. The Reader type allows an io.Reader to be copied over
// a websocket channel as binary content.
-package wsstream // import "k8s.io/apiserver/pkg/util/wsstream"
+package wsstream // import "k8s.io/apimachinery/pkg/util/httpstream/wsstream"
diff --git a/vendor/k8s.io/apiserver/pkg/util/wsstream/stream.go b/vendor/k8s.io/apimachinery/pkg/util/httpstream/wsstream/stream.go
similarity index 100%
rename from vendor/k8s.io/apiserver/pkg/util/wsstream/stream.go
rename to vendor/k8s.io/apimachinery/pkg/util/httpstream/wsstream/stream.go
diff --git a/vendor/k8s.io/apiserver/pkg/admission/configuration/mutating_webhook_manager.go b/vendor/k8s.io/apiserver/pkg/admission/configuration/mutating_webhook_manager.go
index daee678599..3ecc00b74c 100644
--- a/vendor/k8s.io/apiserver/pkg/admission/configuration/mutating_webhook_manager.go
+++ b/vendor/k8s.io/apiserver/pkg/admission/configuration/mutating_webhook_manager.go
@@ -19,8 +19,9 @@ package configuration
import (
"fmt"
"sort"
+ "sync"
- "k8s.io/api/admissionregistration/v1"
+ v1 "k8s.io/api/admissionregistration/v1"
"k8s.io/apimachinery/pkg/labels"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apiserver/pkg/admission/plugin/webhook"
@@ -29,13 +30,22 @@ import (
admissionregistrationlisters "k8s.io/client-go/listers/admissionregistration/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/cache/synctrack"
+ "k8s.io/klog/v2"
)
+// Type for test injection.
+type mutatingWebhookAccessorCreator func(uid string, configurationName string, h *v1.MutatingWebhook) webhook.WebhookAccessor
+
// mutatingWebhookConfigurationManager collects the mutating webhook objects so that they can be called.
type mutatingWebhookConfigurationManager struct {
- lister admissionregistrationlisters.MutatingWebhookConfigurationLister
- hasSynced func() bool
- lazy synctrack.Lazy[[]webhook.WebhookAccessor]
+ lister admissionregistrationlisters.MutatingWebhookConfigurationLister
+ hasSynced func() bool
+ lazy synctrack.Lazy[[]webhook.WebhookAccessor]
+ configurationsCache sync.Map
+ // createMutatingWebhookAccessor is used to instantiate webhook accessors.
+ // This function is defined as field instead of a struct method to allow injection
+ // during tests
+ createMutatingWebhookAccessor mutatingWebhookAccessorCreator
}
var _ generic.Source = &mutatingWebhookConfigurationManager{}
@@ -43,14 +53,35 @@ var _ generic.Source = &mutatingWebhookConfigurationManager{}
func NewMutatingWebhookConfigurationManager(f informers.SharedInformerFactory) generic.Source {
informer := f.Admissionregistration().V1().MutatingWebhookConfigurations()
manager := &mutatingWebhookConfigurationManager{
- lister: informer.Lister(),
+ lister: informer.Lister(),
+ createMutatingWebhookAccessor: webhook.NewMutatingWebhookAccessor,
}
manager.lazy.Evaluate = manager.getConfiguration
handle, _ := informer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
- AddFunc: func(_ interface{}) { manager.lazy.Notify() },
- UpdateFunc: func(_, _ interface{}) { manager.lazy.Notify() },
- DeleteFunc: func(_ interface{}) { manager.lazy.Notify() },
+ AddFunc: func(_ interface{}) { manager.lazy.Notify() },
+ UpdateFunc: func(old, new interface{}) {
+ obj := new.(*v1.MutatingWebhookConfiguration)
+ manager.configurationsCache.Delete(obj.GetName())
+ manager.lazy.Notify()
+ },
+ DeleteFunc: func(obj interface{}) {
+ vwc, ok := obj.(*v1.MutatingWebhookConfiguration)
+ if !ok {
+ tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
+ if !ok {
+ klog.V(2).Infof("Couldn't get object from tombstone %#v", obj)
+ return
+ }
+ vwc, ok = tombstone.Obj.(*v1.MutatingWebhookConfiguration)
+ if !ok {
+ klog.V(2).Infof("Tombstone contained object that is not expected %#v", obj)
+ return
+ }
+ }
+ manager.configurationsCache.Delete(vwc.Name)
+ manager.lazy.Notify()
+ },
})
manager.hasSynced = handle.HasSynced
@@ -75,25 +106,46 @@ func (m *mutatingWebhookConfigurationManager) getConfiguration() ([]webhook.Webh
if err != nil {
return []webhook.WebhookAccessor{}, err
}
- return mergeMutatingWebhookConfigurations(configurations), nil
+ return m.getMutatingWebhookConfigurations(configurations), nil
}
-func mergeMutatingWebhookConfigurations(configurations []*v1.MutatingWebhookConfiguration) []webhook.WebhookAccessor {
+// getMutatingWebhookConfigurations returns the webhook accessors for a given list of
+// mutating webhook configurations.
+//
+// This function will, first, try to load the webhook accessors from the cache and avoid
+// recreating them, which can be expessive (requiring CEL expression recompilation).
+func (m *mutatingWebhookConfigurationManager) getMutatingWebhookConfigurations(configurations []*v1.MutatingWebhookConfiguration) []webhook.WebhookAccessor {
// The internal order of webhooks for each configuration is provided by the user
// but configurations themselves can be in any order. As we are going to run these
// webhooks in serial, they are sorted here to have a deterministic order.
sort.SliceStable(configurations, MutatingWebhookConfigurationSorter(configurations).ByName)
- accessors := []webhook.WebhookAccessor{}
+ size := 0
+ for _, cfg := range configurations {
+ size += len(cfg.Webhooks)
+ }
+ accessors := make([]webhook.WebhookAccessor, 0, size)
+
for _, c := range configurations {
+ cachedConfigurationAccessors, ok := m.configurationsCache.Load(c.Name)
+ if ok {
+ // Pick an already cached webhookAccessor
+ accessors = append(accessors, cachedConfigurationAccessors.([]webhook.WebhookAccessor)...)
+ continue
+ }
+
// webhook names are not validated for uniqueness, so we check for duplicates and
// add a int suffix to distinguish between them
names := map[string]int{}
+ configurationAccessors := make([]webhook.WebhookAccessor, 0, len(c.Webhooks))
for i := range c.Webhooks {
n := c.Webhooks[i].Name
uid := fmt.Sprintf("%s/%s/%d", c.Name, n, names[n])
names[n]++
- accessors = append(accessors, webhook.NewMutatingWebhookAccessor(uid, c.Name, &c.Webhooks[i]))
+ configurationAccessor := m.createMutatingWebhookAccessor(uid, c.Name, &c.Webhooks[i])
+ configurationAccessors = append(configurationAccessors, configurationAccessor)
}
+ accessors = append(accessors, configurationAccessors...)
+ m.configurationsCache.Store(c.Name, configurationAccessors)
}
return accessors
}
diff --git a/vendor/k8s.io/apiserver/pkg/admission/configuration/validating_webhook_manager.go b/vendor/k8s.io/apiserver/pkg/admission/configuration/validating_webhook_manager.go
index f318b50129..b423321177 100644
--- a/vendor/k8s.io/apiserver/pkg/admission/configuration/validating_webhook_manager.go
+++ b/vendor/k8s.io/apiserver/pkg/admission/configuration/validating_webhook_manager.go
@@ -19,8 +19,9 @@ package configuration
import (
"fmt"
"sort"
+ "sync"
- "k8s.io/api/admissionregistration/v1"
+ v1 "k8s.io/api/admissionregistration/v1"
"k8s.io/apimachinery/pkg/labels"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apiserver/pkg/admission/plugin/webhook"
@@ -29,13 +30,22 @@ import (
admissionregistrationlisters "k8s.io/client-go/listers/admissionregistration/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/cache/synctrack"
+ "k8s.io/klog/v2"
)
+// Type for test injection.
+type validatingWebhookAccessorCreator func(uid string, configurationName string, h *v1.ValidatingWebhook) webhook.WebhookAccessor
+
// validatingWebhookConfigurationManager collects the validating webhook objects so that they can be called.
type validatingWebhookConfigurationManager struct {
- lister admissionregistrationlisters.ValidatingWebhookConfigurationLister
- hasSynced func() bool
- lazy synctrack.Lazy[[]webhook.WebhookAccessor]
+ lister admissionregistrationlisters.ValidatingWebhookConfigurationLister
+ hasSynced func() bool
+ lazy synctrack.Lazy[[]webhook.WebhookAccessor]
+ configurationsCache sync.Map
+ // createValidatingWebhookAccessor is used to instantiate webhook accessors.
+ // This function is defined as field instead of a struct method to allow injection
+ // during tests
+ createValidatingWebhookAccessor validatingWebhookAccessorCreator
}
var _ generic.Source = &validatingWebhookConfigurationManager{}
@@ -43,14 +53,35 @@ var _ generic.Source = &validatingWebhookConfigurationManager{}
func NewValidatingWebhookConfigurationManager(f informers.SharedInformerFactory) generic.Source {
informer := f.Admissionregistration().V1().ValidatingWebhookConfigurations()
manager := &validatingWebhookConfigurationManager{
- lister: informer.Lister(),
+ lister: informer.Lister(),
+ createValidatingWebhookAccessor: webhook.NewValidatingWebhookAccessor,
}
manager.lazy.Evaluate = manager.getConfiguration
handle, _ := informer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
- AddFunc: func(_ interface{}) { manager.lazy.Notify() },
- UpdateFunc: func(_, _ interface{}) { manager.lazy.Notify() },
- DeleteFunc: func(_ interface{}) { manager.lazy.Notify() },
+ AddFunc: func(_ interface{}) { manager.lazy.Notify() },
+ UpdateFunc: func(old, new interface{}) {
+ obj := new.(*v1.ValidatingWebhookConfiguration)
+ manager.configurationsCache.Delete(obj.GetName())
+ manager.lazy.Notify()
+ },
+ DeleteFunc: func(obj interface{}) {
+ vwc, ok := obj.(*v1.ValidatingWebhookConfiguration)
+ if !ok {
+ tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
+ if !ok {
+ klog.V(2).Infof("Couldn't get object from tombstone %#v", obj)
+ return
+ }
+ vwc, ok = tombstone.Obj.(*v1.ValidatingWebhookConfiguration)
+ if !ok {
+ klog.V(2).Infof("Tombstone contained object that is not expected %#v", obj)
+ return
+ }
+ }
+ manager.configurationsCache.Delete(vwc.Name)
+ manager.lazy.Notify()
+ },
})
manager.hasSynced = handle.HasSynced
@@ -66,7 +97,7 @@ func (v *validatingWebhookConfigurationManager) Webhooks() []webhook.WebhookAcce
return out
}
-// HasSynced returns true if the initial set of mutating webhook configurations
+// HasSynced returns true if the initial set of validating webhook configurations
// has been loaded.
func (v *validatingWebhookConfigurationManager) HasSynced() bool { return v.hasSynced() }
@@ -75,23 +106,45 @@ func (v *validatingWebhookConfigurationManager) getConfiguration() ([]webhook.We
if err != nil {
return []webhook.WebhookAccessor{}, err
}
- return mergeValidatingWebhookConfigurations(configurations), nil
+ return v.getValidatingWebhookConfigurations(configurations), nil
}
-func mergeValidatingWebhookConfigurations(configurations []*v1.ValidatingWebhookConfiguration) []webhook.WebhookAccessor {
+// getMutatingWebhookConfigurations returns the webhook accessors for a given list of
+// mutating webhook configurations.
+//
+// This function will, first, try to load the webhook accessors from the cache and avoid
+// recreating them, which can be expessive (requiring CEL expression recompilation).
+func (v *validatingWebhookConfigurationManager) getValidatingWebhookConfigurations(configurations []*v1.ValidatingWebhookConfiguration) []webhook.WebhookAccessor {
sort.SliceStable(configurations, ValidatingWebhookConfigurationSorter(configurations).ByName)
- accessors := []webhook.WebhookAccessor{}
+ size := 0
+ for _, cfg := range configurations {
+ size += len(cfg.Webhooks)
+ }
+ accessors := make([]webhook.WebhookAccessor, 0, size)
+
for _, c := range configurations {
+ cachedConfigurationAccessors, ok := v.configurationsCache.Load(c.Name)
+ if ok {
+ // Pick an already cached webhookAccessor
+ accessors = append(accessors, cachedConfigurationAccessors.([]webhook.WebhookAccessor)...)
+ continue
+ }
+
// webhook names are not validated for uniqueness, so we check for duplicates and
// add a int suffix to distinguish between them
names := map[string]int{}
+ configurationAccessors := make([]webhook.WebhookAccessor, 0, len(c.Webhooks))
for i := range c.Webhooks {
n := c.Webhooks[i].Name
uid := fmt.Sprintf("%s/%s/%d", c.Name, n, names[n])
names[n]++
- accessors = append(accessors, webhook.NewValidatingWebhookAccessor(uid, c.Name, &c.Webhooks[i]))
+ configurationAccessor := v.createValidatingWebhookAccessor(uid, c.Name, &c.Webhooks[i])
+ configurationAccessors = append(configurationAccessors, configurationAccessor)
}
+ accessors = append(accessors, configurationAccessors...)
+ v.configurationsCache.Store(c.Name, configurationAccessors)
}
+
return accessors
}
diff --git a/vendor/k8s.io/apiserver/pkg/admission/metrics/metrics.go b/vendor/k8s.io/apiserver/pkg/admission/metrics/metrics.go
index 26b82c37e3..6c1761149f 100644
--- a/vendor/k8s.io/apiserver/pkg/admission/metrics/metrics.go
+++ b/vendor/k8s.io/apiserver/pkg/admission/metrics/metrics.go
@@ -54,6 +54,8 @@ var (
type ObserverFunc func(ctx context.Context, elapsed time.Duration, rejected bool, attr admission.Attributes, stepType string, extraLabels ...string)
const (
+ kindWebhook = "webhook"
+ kindPolicy = "policy"
stepValidate = "validate"
stepAdmit = "admit"
)
@@ -112,13 +114,15 @@ func (p pluginHandlerWithMetrics) Validate(ctx context.Context, a admission.Attr
// AdmissionMetrics instruments admission with prometheus metrics.
type AdmissionMetrics struct {
- step *metricSet
- controller *metricSet
- webhook *metricSet
- webhookRejection *metrics.CounterVec
- webhookFailOpen *metrics.CounterVec
- webhookRequest *metrics.CounterVec
- matchConditionEvalErrors *metrics.CounterVec
+ step *metricSet
+ controller *metricSet
+ webhook *metricSet
+ webhookRejection *metrics.CounterVec
+ webhookFailOpen *metrics.CounterVec
+ webhookRequest *metrics.CounterVec
+ matchConditionEvalErrors *metrics.CounterVec
+ matchConditionExclusions *metrics.CounterVec
+ matchConditionEvaluationSeconds *metricSet
}
// newAdmissionMetrics create a new AdmissionMetrics, configured with default metric names.
@@ -222,20 +226,47 @@ func newAdmissionMetrics() *AdmissionMetrics {
&metrics.CounterOpts{
Namespace: namespace,
Subsystem: subsystem,
- Name: "admission_match_condition_evaluation_errors_total",
- Help: "Admission match condition evaluation errors count, identified by name of resource containing the match condition and broken out for each admission type (validating or mutating).",
+ Name: "match_condition_evaluation_errors_total",
+ Help: "Admission match condition evaluation errors count, identified by name of resource containing the match condition and broken out for each kind containing matchConditions (webhook or policy), operation and admission type (validate or admit).",
StabilityLevel: metrics.ALPHA,
},
- []string{"name", "type"})
+ []string{"name", "kind", "type", "operation"})
+
+ matchConditionExclusions := metrics.NewCounterVec(
+ &metrics.CounterOpts{
+ Namespace: namespace,
+ Subsystem: subsystem,
+ Name: "match_condition_exclusions_total",
+ Help: "Admission match condition evaluation exclusions count, identified by name of resource containing the match condition and broken out for each kind containing matchConditions (webhook or policy), operation and admission type (validate or admit).",
+ StabilityLevel: metrics.ALPHA,
+ },
+ []string{"name", "kind", "type", "operation"})
+
+ matchConditionEvaluationSeconds := &metricSet{
+ latencies: metrics.NewHistogramVec(
+ &metrics.HistogramOpts{
+ Namespace: namespace,
+ Subsystem: subsystem,
+ Name: "match_condition_evaluation_seconds",
+ Help: "Admission match condition evaluation time in seconds, identified by name and broken out for each kind containing matchConditions (webhook or policy), operation and type (validate or admit).",
+ Buckets: []float64{0.001, 0.005, 0.01, 0.025, 0.1, 0.2, 0.25},
+ StabilityLevel: metrics.ALPHA,
+ },
+ []string{"name", "kind", "type", "operation"},
+ ),
+ latenciesSummary: nil,
+ }
step.mustRegister()
controller.mustRegister()
webhook.mustRegister()
+ matchConditionEvaluationSeconds.mustRegister()
legacyregistry.MustRegister(webhookRejection)
legacyregistry.MustRegister(webhookFailOpen)
legacyregistry.MustRegister(webhookRequest)
legacyregistry.MustRegister(matchConditionEvalError)
- return &AdmissionMetrics{step: step, controller: controller, webhook: webhook, webhookRejection: webhookRejection, webhookFailOpen: webhookFailOpen, webhookRequest: webhookRequest, matchConditionEvalErrors: matchConditionEvalError}
+ legacyregistry.MustRegister(matchConditionExclusions)
+ return &AdmissionMetrics{step: step, controller: controller, webhook: webhook, webhookRejection: webhookRejection, webhookFailOpen: webhookFailOpen, webhookRequest: webhookRequest, matchConditionEvalErrors: matchConditionEvalError, matchConditionExclusions: matchConditionExclusions, matchConditionEvaluationSeconds: matchConditionEvaluationSeconds}
}
func (m *AdmissionMetrics) reset() {
@@ -280,8 +311,18 @@ func (m *AdmissionMetrics) ObserveWebhookFailOpen(ctx context.Context, name, ste
}
// ObserveMatchConditionEvalError records validating or mutating webhook that are not called due to match conditions
-func (m *AdmissionMetrics) ObserveMatchConditionEvalError(ctx context.Context, name, stepType string) {
- m.matchConditionEvalErrors.WithContext(ctx).WithLabelValues(name, stepType).Inc()
+func (m *AdmissionMetrics) ObserveMatchConditionEvalError(ctx context.Context, name, kind, stepType, operation string) {
+ m.matchConditionEvalErrors.WithContext(ctx).WithLabelValues(name, kind, stepType, operation).Inc()
+}
+
+// ObserveMatchConditionExclusion records validating or mutating webhook that are not called due to match conditions
+func (m *AdmissionMetrics) ObserveMatchConditionExclusion(ctx context.Context, name, kind, stepType, operation string) {
+ m.matchConditionExclusions.WithContext(ctx).WithLabelValues(name, kind, stepType, operation).Inc()
+}
+
+// ObserveMatchConditionEvaluationTime records duration of match condition evaluation process.
+func (m *AdmissionMetrics) ObserveMatchConditionEvaluationTime(ctx context.Context, elapsed time.Duration, name, kind, stepType, operation string) {
+ m.matchConditionEvaluationSeconds.observe(ctx, elapsed, name, kind, stepType, operation)
}
type metricSet struct {
diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/cel/compile.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/cel/compile.go
index bb122de5fa..25ee108ea9 100644
--- a/vendor/k8s.io/apiserver/pkg/admission/plugin/cel/compile.go
+++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/cel/compile.go
@@ -18,12 +18,13 @@ package cel
import (
"fmt"
- celconfig "k8s.io/apiserver/pkg/apis/cel"
- "sync"
"github.com/google/cel-go/cel"
+ "k8s.io/apimachinery/pkg/util/version"
+ celconfig "k8s.io/apiserver/pkg/apis/cel"
apiservercel "k8s.io/apiserver/pkg/cel"
+ "k8s.io/apiserver/pkg/cel/environment"
"k8s.io/apiserver/pkg/cel/library"
)
@@ -32,108 +33,12 @@ const (
OldObjectVarName = "oldObject"
ParamsVarName = "params"
RequestVarName = "request"
+ NamespaceVarName = "namespaceObject"
AuthorizerVarName = "authorizer"
RequestResourceAuthorizerVarName = "authorizer.requestResource"
+ VariableVarName = "variables"
)
-var (
- initEnvsOnce sync.Once
- initEnvs envs
- initEnvsErr error
-)
-
-func getEnvs() (envs, error) {
- initEnvsOnce.Do(func() {
- requiredVarsEnv, err := buildRequiredVarsEnv()
- if err != nil {
- initEnvsErr = err
- return
- }
-
- initEnvs, err = buildWithOptionalVarsEnvs(requiredVarsEnv)
- if err != nil {
- initEnvsErr = err
- return
- }
- })
- return initEnvs, initEnvsErr
-}
-
-// This is a similar code as in k8s.io/apiextensions-apiserver/pkg/apiserver/schema/cel/compilation.go
-// If any changes are made here, consider to make the same changes there as well.
-func buildBaseEnv() (*cel.Env, error) {
- var opts []cel.EnvOption
- opts = append(opts, cel.HomogeneousAggregateLiterals())
- // Validate function declarations once during base env initialization,
- // so they don't need to be evaluated each time a CEL rule is compiled.
- // This is a relatively expensive operation.
- opts = append(opts, cel.EagerlyValidateDeclarations(true), cel.DefaultUTCTimeZone(true))
- opts = append(opts, library.ExtensionLibs...)
-
- return cel.NewEnv(opts...)
-}
-
-func buildRequiredVarsEnv() (*cel.Env, error) {
- baseEnv, err := buildBaseEnv()
- if err != nil {
- return nil, err
- }
- var propDecls []cel.EnvOption
- reg := apiservercel.NewRegistry(baseEnv)
-
- requestType := BuildRequestType()
- rt, err := apiservercel.NewRuleTypes(requestType.TypeName(), requestType, reg)
- if err != nil {
- return nil, err
- }
- if rt == nil {
- return nil, nil
- }
- opts, err := rt.EnvOptions(baseEnv.TypeProvider())
- if err != nil {
- return nil, err
- }
- propDecls = append(propDecls, cel.Variable(ObjectVarName, cel.DynType))
- propDecls = append(propDecls, cel.Variable(OldObjectVarName, cel.DynType))
- propDecls = append(propDecls, cel.Variable(RequestVarName, requestType.CelType()))
-
- opts = append(opts, propDecls...)
- env, err := baseEnv.Extend(opts...)
- if err != nil {
- return nil, err
- }
- return env, nil
-}
-
-type envs map[OptionalVariableDeclarations]*cel.Env
-
-func buildEnvWithVars(baseVarsEnv *cel.Env, options OptionalVariableDeclarations) (*cel.Env, error) {
- var opts []cel.EnvOption
- if options.HasParams {
- opts = append(opts, cel.Variable(ParamsVarName, cel.DynType))
- }
- if options.HasAuthorizer {
- opts = append(opts, cel.Variable(AuthorizerVarName, library.AuthorizerType))
- opts = append(opts, cel.Variable(RequestResourceAuthorizerVarName, library.ResourceCheckType))
- }
- return baseVarsEnv.Extend(opts...)
-}
-
-func buildWithOptionalVarsEnvs(requiredVarsEnv *cel.Env) (envs, error) {
- envs := make(envs, 4) // since the number of variable combinations is small, pre-build a environment for each
- for _, hasParams := range []bool{false, true} {
- for _, hasAuthorizer := range []bool{false, true} {
- opts := OptionalVariableDeclarations{HasParams: hasParams, HasAuthorizer: hasAuthorizer}
- env, err := buildEnvWithVars(requiredVarsEnv, opts)
- if err != nil {
- return nil, err
- }
- envs[opts] = env
- }
- }
- return envs, nil
-}
-
// BuildRequestType generates a DeclType for AdmissionRequest. This may be replaced with a utility that
// converts the native type definition to apiservercel.DeclType once such a utility becomes available.
// The 'uid' field is omitted since it is not needed for in-process admission review.
@@ -181,6 +86,56 @@ func BuildRequestType() *apiservercel.DeclType {
))
}
+// BuildNamespaceType generates a DeclType for Namespace.
+// Certain nested fields in Namespace (e.g. managedFields, ownerReferences etc.) are omitted in the generated DeclType
+// by design.
+func BuildNamespaceType() *apiservercel.DeclType {
+ field := func(name string, declType *apiservercel.DeclType, required bool) *apiservercel.DeclField {
+ return apiservercel.NewDeclField(name, declType, required, nil, nil)
+ }
+ fields := func(fields ...*apiservercel.DeclField) map[string]*apiservercel.DeclField {
+ result := make(map[string]*apiservercel.DeclField, len(fields))
+ for _, f := range fields {
+ result[f.Name] = f
+ }
+ return result
+ }
+
+ specType := apiservercel.NewObjectType("kubernetes.NamespaceSpec", fields(
+ field("finalizers", apiservercel.NewListType(apiservercel.StringType, -1), true),
+ ))
+ conditionType := apiservercel.NewObjectType("kubernetes.NamespaceCondition", fields(
+ field("status", apiservercel.StringType, true),
+ field("type", apiservercel.StringType, true),
+ field("lastTransitionTime", apiservercel.TimestampType, true),
+ field("message", apiservercel.StringType, true),
+ field("reason", apiservercel.StringType, true),
+ ))
+ statusType := apiservercel.NewObjectType("kubernetes.NamespaceStatus", fields(
+ field("conditions", apiservercel.NewListType(conditionType, -1), true),
+ field("phase", apiservercel.StringType, true),
+ ))
+ metadataType := apiservercel.NewObjectType("kubernetes.NamespaceMetadata", fields(
+ field("name", apiservercel.StringType, true),
+ field("generateName", apiservercel.StringType, true),
+ field("namespace", apiservercel.StringType, true),
+ field("labels", apiservercel.NewMapType(apiservercel.StringType, apiservercel.StringType, -1), true),
+ field("annotations", apiservercel.NewMapType(apiservercel.StringType, apiservercel.StringType, -1), true),
+ field("UID", apiservercel.StringType, true),
+ field("creationTimestamp", apiservercel.TimestampType, true),
+ field("deletionGracePeriodSeconds", apiservercel.IntType, true),
+ field("deletionTimestamp", apiservercel.TimestampType, true),
+ field("generation", apiservercel.IntType, true),
+ field("resourceVersion", apiservercel.StringType, true),
+ field("finalizers", apiservercel.NewListType(apiservercel.StringType, -1), true),
+ ))
+ return apiservercel.NewObjectType("kubernetes.Namespace", fields(
+ field("metadata", metadataType, true),
+ field("spec", specType, true),
+ field("status", statusType, true),
+ ))
+}
+
// CompilationResult represents a compiled validations expression.
type CompilationResult struct {
Program cel.Program
@@ -188,45 +143,48 @@ type CompilationResult struct {
ExpressionAccessor ExpressionAccessor
}
+// Compiler provides a CEL expression compiler configured with the desired admission related CEL variables and
+// environment mode.
+type Compiler interface {
+ CompileCELExpression(expressionAccessor ExpressionAccessor, options OptionalVariableDeclarations, mode environment.Type) CompilationResult
+}
+
+type compiler struct {
+ varEnvs variableDeclEnvs
+}
+
+func NewCompiler(env *environment.EnvSet) Compiler {
+ return &compiler{varEnvs: mustBuildEnvs(env)}
+}
+
+type variableDeclEnvs map[OptionalVariableDeclarations]*environment.EnvSet
+
// CompileCELExpression returns a compiled CEL expression.
// perCallLimit was added for testing purpose only. Callers should always use const PerCallLimit from k8s.io/apiserver/pkg/apis/cel/config.go as input.
-func CompileCELExpression(expressionAccessor ExpressionAccessor, optionalVars OptionalVariableDeclarations, perCallLimit uint64) CompilationResult {
- var env *cel.Env
- envs, err := getEnvs()
- if err != nil {
+func (c compiler) CompileCELExpression(expressionAccessor ExpressionAccessor, options OptionalVariableDeclarations, envType environment.Type) CompilationResult {
+ resultError := func(errorString string, errType apiservercel.ErrorType) CompilationResult {
return CompilationResult{
Error: &apiservercel.Error{
- Type: apiservercel.ErrorTypeInternal,
- Detail: "compiler initialization failed: " + err.Error(),
+ Type: errType,
+ Detail: errorString,
},
ExpressionAccessor: expressionAccessor,
}
}
- env, ok := envs[optionalVars]
- if !ok {
- return CompilationResult{
- Error: &apiservercel.Error{
- Type: apiservercel.ErrorTypeInvalid,
- Detail: fmt.Sprintf("compiler initialization failed: failed to load environment for %v", optionalVars),
- },
- ExpressionAccessor: expressionAccessor,
- }
+
+ env, err := c.varEnvs[options].Env(envType)
+ if err != nil {
+ return resultError(fmt.Sprintf("unexpected error loading CEL environment: %v", err), apiservercel.ErrorTypeInternal)
}
ast, issues := env.Compile(expressionAccessor.GetExpression())
if issues != nil {
- return CompilationResult{
- Error: &apiservercel.Error{
- Type: apiservercel.ErrorTypeInvalid,
- Detail: "compilation failed: " + issues.String(),
- },
- ExpressionAccessor: expressionAccessor,
- }
+ return resultError("compilation failed: "+issues.String(), apiservercel.ErrorTypeInvalid)
}
found := false
returnTypes := expressionAccessor.ReturnTypes()
for _, returnType := range returnTypes {
- if ast.OutputType() == returnType {
+ if ast.OutputType() == returnType || cel.AnyType == returnType {
found = true
break
}
@@ -239,43 +197,64 @@ func CompileCELExpression(expressionAccessor ExpressionAccessor, optionalVars Op
reason = fmt.Sprintf("must evaluate to one of %v", returnTypes)
}
- return CompilationResult{
- Error: &apiservercel.Error{
- Type: apiservercel.ErrorTypeInvalid,
- Detail: reason,
- },
- ExpressionAccessor: expressionAccessor,
- }
+ return resultError(reason, apiservercel.ErrorTypeInvalid)
}
_, err = cel.AstToCheckedExpr(ast)
if err != nil {
// should be impossible since env.Compile returned no issues
- return CompilationResult{
- Error: &apiservercel.Error{
- Type: apiservercel.ErrorTypeInternal,
- Detail: "unexpected compilation error: " + err.Error(),
- },
- ExpressionAccessor: expressionAccessor,
- }
+ return resultError("unexpected compilation error: "+err.Error(), apiservercel.ErrorTypeInternal)
}
prog, err := env.Program(ast,
- cel.EvalOptions(cel.OptOptimize, cel.OptTrackCost),
- cel.OptimizeRegex(library.ExtensionLibRegexOptimizations...),
cel.InterruptCheckFrequency(celconfig.CheckFrequency),
- cel.CostLimit(perCallLimit),
)
if err != nil {
- return CompilationResult{
- Error: &apiservercel.Error{
- Type: apiservercel.ErrorTypeInvalid,
- Detail: "program instantiation failed: " + err.Error(),
- },
- ExpressionAccessor: expressionAccessor,
- }
+ return resultError("program instantiation failed: "+err.Error(), apiservercel.ErrorTypeInternal)
}
return CompilationResult{
Program: prog,
ExpressionAccessor: expressionAccessor,
}
}
+
+func mustBuildEnvs(baseEnv *environment.EnvSet) variableDeclEnvs {
+ requestType := BuildRequestType()
+ namespaceType := BuildNamespaceType()
+ envs := make(variableDeclEnvs, 4) // since the number of variable combinations is small, pre-build a environment for each
+ for _, hasParams := range []bool{false, true} {
+ for _, hasAuthorizer := range []bool{false, true} {
+ var envOpts []cel.EnvOption
+ if hasParams {
+ envOpts = append(envOpts, cel.Variable(ParamsVarName, cel.DynType))
+ }
+ if hasAuthorizer {
+ envOpts = append(envOpts,
+ cel.Variable(AuthorizerVarName, library.AuthorizerType),
+ cel.Variable(RequestResourceAuthorizerVarName, library.ResourceCheckType))
+ }
+ envOpts = append(envOpts,
+ cel.Variable(ObjectVarName, cel.DynType),
+ cel.Variable(OldObjectVarName, cel.DynType),
+ cel.Variable(NamespaceVarName, namespaceType.CelType()),
+ cel.Variable(RequestVarName, requestType.CelType()))
+
+ extended, err := baseEnv.Extend(
+ environment.VersionedOptions{
+ // Feature epoch was actually 1.26, but we artificially set it to 1.0 because these
+ // options should always be present.
+ IntroducedVersion: version.MajorMinor(1, 0),
+ EnvOptions: envOpts,
+ DeclTypes: []*apiservercel.DeclType{
+ namespaceType,
+ requestType,
+ },
+ },
+ )
+ if err != nil {
+ panic(fmt.Sprintf("environment misconfigured: %v", err))
+ }
+ envs[OptionalVariableDeclarations{HasParams: hasParams, HasAuthorizer: hasAuthorizer}] = extended
+ }
+ }
+ return envs
+}
diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/cel/composition.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/cel/composition.go
new file mode 100644
index 0000000000..38b80a304a
--- /dev/null
+++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/cel/composition.go
@@ -0,0 +1,198 @@
+/*
+Copyright 2023 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cel
+
+import (
+ "context"
+ "math"
+
+ "github.com/google/cel-go/cel"
+ "github.com/google/cel-go/common/types"
+ "github.com/google/cel-go/common/types/ref"
+
+ v1 "k8s.io/api/admission/v1"
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/util/version"
+ "k8s.io/apiserver/pkg/admission"
+ apiservercel "k8s.io/apiserver/pkg/cel"
+ "k8s.io/apiserver/pkg/cel/environment"
+ "k8s.io/apiserver/pkg/cel/lazy"
+)
+
+const VariablesTypeName = "kubernetes.variables"
+
+type CompositedCompiler struct {
+ Compiler
+ FilterCompiler
+
+ CompositionEnv *CompositionEnv
+}
+
+type CompositedFilter struct {
+ Filter
+
+ compositionEnv *CompositionEnv
+}
+
+func NewCompositedCompiler(envSet *environment.EnvSet) (*CompositedCompiler, error) {
+ compositionContext, err := NewCompositionEnv(VariablesTypeName, envSet)
+ if err != nil {
+ return nil, err
+ }
+ compiler := NewCompiler(compositionContext.EnvSet)
+ filterCompiler := NewFilterCompiler(compositionContext.EnvSet)
+ return &CompositedCompiler{
+ Compiler: compiler,
+ FilterCompiler: filterCompiler,
+ CompositionEnv: compositionContext,
+ }, nil
+}
+
+func (c *CompositedCompiler) CompileAndStoreVariables(variables []NamedExpressionAccessor, options OptionalVariableDeclarations, mode environment.Type) {
+ for _, v := range variables {
+ _ = c.CompileAndStoreVariable(v, options, mode)
+ }
+}
+
+func (c *CompositedCompiler) CompileAndStoreVariable(variable NamedExpressionAccessor, options OptionalVariableDeclarations, mode environment.Type) CompilationResult {
+ c.CompositionEnv.AddField(variable.GetName())
+ result := c.Compiler.CompileCELExpression(variable, options, mode)
+ c.CompositionEnv.CompiledVariables[variable.GetName()] = result
+ return result
+}
+
+func (c *CompositedCompiler) Compile(expressions []ExpressionAccessor, optionalDecls OptionalVariableDeclarations, envType environment.Type) Filter {
+ filter := c.FilterCompiler.Compile(expressions, optionalDecls, envType)
+ return &CompositedFilter{
+ Filter: filter,
+ compositionEnv: c.CompositionEnv,
+ }
+}
+
+type CompositionEnv struct {
+ *environment.EnvSet
+
+ MapType *apiservercel.DeclType
+ CompiledVariables map[string]CompilationResult
+}
+
+func (c *CompositionEnv) AddField(name string) {
+ c.MapType.Fields[name] = apiservercel.NewDeclField(name, apiservercel.DynType, true, nil, nil)
+}
+
+func NewCompositionEnv(typeName string, baseEnvSet *environment.EnvSet) (*CompositionEnv, error) {
+ declType := apiservercel.NewObjectType(typeName, map[string]*apiservercel.DeclField{})
+ envSet, err := baseEnvSet.Extend(environment.VersionedOptions{
+ // set to 1.0 because composition is one of the fundamental components
+ IntroducedVersion: version.MajorMinor(1, 0),
+ EnvOptions: []cel.EnvOption{
+ cel.Variable("variables", declType.CelType()),
+ },
+ DeclTypes: []*apiservercel.DeclType{
+ declType,
+ },
+ })
+ if err != nil {
+ return nil, err
+ }
+ return &CompositionEnv{
+ MapType: declType,
+ EnvSet: envSet,
+ CompiledVariables: map[string]CompilationResult{},
+ }, nil
+}
+
+func (c *CompositionEnv) CreateContext(parent context.Context) CompositionContext {
+ return &compositionContext{
+ Context: parent,
+ compositionEnv: c,
+ }
+}
+
+type CompositionContext interface {
+ context.Context
+ Variables(activation any) ref.Val
+ GetAndResetCost() int64
+}
+
+type compositionContext struct {
+ context.Context
+
+ compositionEnv *CompositionEnv
+ accumulatedCost int64
+}
+
+func (c *compositionContext) Variables(activation any) ref.Val {
+ lazyMap := lazy.NewMapValue(c.compositionEnv.MapType)
+ for name, result := range c.compositionEnv.CompiledVariables {
+ accessor := &variableAccessor{
+ name: name,
+ result: result,
+ activation: activation,
+ context: c,
+ }
+ lazyMap.Append(name, accessor.Callback)
+ }
+ return lazyMap
+}
+
+func (f *CompositedFilter) ForInput(ctx context.Context, versionedAttr *admission.VersionedAttributes, request *v1.AdmissionRequest, optionalVars OptionalVariableBindings, namespace *corev1.Namespace, runtimeCELCostBudget int64) ([]EvaluationResult, int64, error) {
+ ctx = f.compositionEnv.CreateContext(ctx)
+ return f.Filter.ForInput(ctx, versionedAttr, request, optionalVars, namespace, runtimeCELCostBudget)
+}
+
+func (c *compositionContext) reportCost(cost int64) {
+ c.accumulatedCost += cost
+}
+
+func (c *compositionContext) GetAndResetCost() int64 {
+ cost := c.accumulatedCost
+ c.accumulatedCost = 0
+ return cost
+}
+
+type variableAccessor struct {
+ name string
+ result CompilationResult
+ activation any
+ context *compositionContext
+}
+
+func (a *variableAccessor) Callback(_ *lazy.MapValue) ref.Val {
+ if a.result.Error != nil {
+ return types.NewErr("composited variable %q fails to compile: %v", a.name, a.result.Error)
+ }
+
+ v, details, err := a.result.Program.Eval(a.activation)
+ if details == nil {
+ return types.NewErr("unable to get evaluation details of variable %q", a.name)
+ }
+ costPtr := details.ActualCost()
+ if costPtr == nil {
+ return types.NewErr("unable to calculate cost of variable %q", a.name)
+ }
+ cost := int64(*costPtr)
+ if *costPtr > math.MaxInt64 {
+ cost = math.MaxInt64
+ }
+ a.context.reportCost(cost)
+
+ if err != nil {
+ return types.NewErr("composited variable %q fails to evaluate: %v", a.name, err)
+ }
+ return v
+}
diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/cel/filter.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/cel/filter.go
index 6e504897c5..3e2a63e75c 100644
--- a/vendor/k8s.io/apiserver/pkg/admission/plugin/cel/filter.go
+++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/cel/filter.go
@@ -27,24 +27,27 @@ import (
admissionv1 "k8s.io/api/admission/v1"
authenticationv1 "k8s.io/api/authentication/v1"
+ v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apiserver/pkg/admission"
"k8s.io/apiserver/pkg/cel"
+ "k8s.io/apiserver/pkg/cel/environment"
"k8s.io/apiserver/pkg/cel/library"
)
// filterCompiler implement the interface FilterCompiler.
type filterCompiler struct {
+ compiler Compiler
}
-func NewFilterCompiler() FilterCompiler {
- return &filterCompiler{}
+func NewFilterCompiler(env *environment.EnvSet) FilterCompiler {
+ return &filterCompiler{compiler: NewCompiler(env)}
}
type evaluationActivation struct {
- object, oldObject, params, request, authorizer, requestResourceAuthorizer interface{}
+ object, oldObject, params, request, namespace, authorizer, requestResourceAuthorizer, variables interface{}
}
// ResolveName returns a value from the activation by qualified name, or false if the name
@@ -59,10 +62,14 @@ func (a *evaluationActivation) ResolveName(name string) (interface{}, bool) {
return a.params, true // params may be null
case RequestVarName:
return a.request, true
+ case NamespaceVarName:
+ return a.namespace, true
case AuthorizerVarName:
return a.authorizer, a.authorizer != nil
case RequestResourceAuthorizerVarName:
return a.requestResourceAuthorizer, a.requestResourceAuthorizer != nil
+ case VariableVarName: // variables always present
+ return a.variables, true
default:
return nil, false
}
@@ -75,13 +82,13 @@ func (a *evaluationActivation) Parent() interpreter.Activation {
}
// Compile compiles the cel expressions defined in the ExpressionAccessors into a Filter
-func (c *filterCompiler) Compile(expressionAccessors []ExpressionAccessor, options OptionalVariableDeclarations, perCallLimit uint64) Filter {
+func (c *filterCompiler) Compile(expressionAccessors []ExpressionAccessor, options OptionalVariableDeclarations, mode environment.Type) Filter {
compilationResults := make([]CompilationResult, len(expressionAccessors))
for i, expressionAccessor := range expressionAccessors {
if expressionAccessor == nil {
continue
}
- compilationResults[i] = CompileCELExpression(expressionAccessor, options, perCallLimit)
+ compilationResults[i] = c.compiler.CompileCELExpression(expressionAccessor, options, mode)
}
return NewFilter(compilationResults)
}
@@ -122,7 +129,7 @@ func objectToResolveVal(r runtime.Object) (interface{}, error) {
// ForInput evaluates the compiled CEL expressions converting them into CELEvaluations
// errors per evaluation are returned on the Evaluation object
// runtimeCELCostBudget was added for testing purpose only. Callers should always use const RuntimeCELCostBudget from k8s.io/apiserver/pkg/apis/cel/config.go as input.
-func (f *filter) ForInput(ctx context.Context, versionedAttr *admission.VersionedAttributes, request *admissionv1.AdmissionRequest, inputs OptionalVariableBindings, runtimeCELCostBudget int64) ([]EvaluationResult, int64, error) {
+func (f *filter) ForInput(ctx context.Context, versionedAttr *admission.VersionedAttributes, request *admissionv1.AdmissionRequest, inputs OptionalVariableBindings, namespace *v1.Namespace, runtimeCELCostBudget int64) ([]EvaluationResult, int64, error) {
// TODO: replace unstructured with ref.Val for CEL variables when native type support is available
evaluations := make([]EvaluationResult, len(f.compilationResults))
var err error
@@ -152,15 +159,28 @@ func (f *filter) ForInput(ctx context.Context, versionedAttr *admission.Versione
if err != nil {
return nil, -1, err
}
+ namespaceVal, err := objectToResolveVal(namespace)
+ if err != nil {
+ return nil, -1, err
+ }
va := &evaluationActivation{
object: objectVal,
oldObject: oldObjectVal,
params: paramsVal,
request: requestVal.Object,
+ namespace: namespaceVal,
authorizer: authorizerVal,
requestResourceAuthorizer: requestResourceAuthorizerVal,
}
+ // composition is an optional feature that only applies for ValidatingAdmissionPolicy.
+ // check if the context allows composition
+ var compositionCtx CompositionContext
+ var ok bool
+ if compositionCtx, ok = ctx.(CompositionContext); ok {
+ va.variables = compositionCtx.Variables(va)
+ }
+
remainingBudget := runtimeCELCostBudget
for i, compilationResult := range f.compilationResults {
var evaluation = &evaluations[i]
@@ -184,6 +204,17 @@ func (f *filter) ForInput(ctx context.Context, versionedAttr *admission.Versione
}
t1 := time.Now()
evalResult, evalDetails, err := compilationResult.Program.ContextEval(ctx, va)
+ // budget may be spent due to lazy evaluation of composited variables
+ if compositionCtx != nil {
+ compositionCost := compositionCtx.GetAndResetCost()
+ if compositionCost > remainingBudget {
+ return nil, -1, &cel.Error{
+ Type: cel.ErrorTypeInvalid,
+ Detail: fmt.Sprintf("validation failed due to running out of cost budget, no further validation rules will be run"),
+ }
+ }
+ remainingBudget -= compositionCost
+ }
elapsed := time.Since(t1)
evaluation.Elapsed = elapsed
if evalDetails == nil {
@@ -222,10 +253,13 @@ func (f *filter) ForInput(ctx context.Context, versionedAttr *admission.Versione
}
// TODO: to reuse https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/request/admissionreview.go#L154
-func CreateAdmissionRequest(attr admission.Attributes) *admissionv1.AdmissionRequest {
- // FIXME: how to get resource GVK, GVR and subresource?
- gvk := attr.GetKind()
- gvr := attr.GetResource()
+func CreateAdmissionRequest(attr admission.Attributes, equivalentGVR metav1.GroupVersionResource, equivalentKind metav1.GroupVersionKind) *admissionv1.AdmissionRequest {
+ // Attempting to use same logic as webhook for constructing resource
+ // GVK, GVR, subresource
+ // Use the GVK, GVR that the matcher decided was equivalent to that of the request
+ // https://github.com/kubernetes/kubernetes/blob/90c362b3430bcbbf8f245fadbcd521dab39f1d7c/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/generic/webhook.go#L182-L210
+ gvk := equivalentKind
+ gvr := equivalentGVR
subresource := attr.GetSubresource()
requestGVK := attr.GetKind()
@@ -284,6 +318,33 @@ func CreateAdmissionRequest(attr admission.Attributes) *admissionv1.AdmissionReq
}
}
+// CreateNamespaceObject creates a Namespace object that is suitable for the CEL evaluation.
+// If the namespace is nil, CreateNamespaceObject returns nil
+func CreateNamespaceObject(namespace *v1.Namespace) *v1.Namespace {
+ if namespace == nil {
+ return nil
+ }
+
+ return &v1.Namespace{
+ Status: namespace.Status,
+ Spec: namespace.Spec,
+ ObjectMeta: metav1.ObjectMeta{
+ Name: namespace.Name,
+ GenerateName: namespace.GenerateName,
+ Namespace: namespace.Namespace,
+ UID: namespace.UID,
+ ResourceVersion: namespace.ResourceVersion,
+ Generation: namespace.Generation,
+ CreationTimestamp: namespace.CreationTimestamp,
+ DeletionTimestamp: namespace.DeletionTimestamp,
+ DeletionGracePeriodSeconds: namespace.DeletionGracePeriodSeconds,
+ Labels: namespace.Labels,
+ Annotations: namespace.Annotations,
+ Finalizers: namespace.Finalizers,
+ },
+ }
+}
+
// CompilationErrors returns a list of all the errors from the compilation of the evaluator
func (e *filter) CompilationErrors() []error {
compilationErrors := []error{}
diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/cel/interface.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/cel/interface.go
index d3c4a0217d..c9f4e63369 100644
--- a/vendor/k8s.io/apiserver/pkg/admission/plugin/cel/interface.go
+++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/cel/interface.go
@@ -24,9 +24,11 @@ import (
"github.com/google/cel-go/common/types/ref"
v1 "k8s.io/api/admission/v1"
+ corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apiserver/pkg/admission"
"k8s.io/apiserver/pkg/authorization/authorizer"
+ "k8s.io/apiserver/pkg/cel/environment"
)
type ExpressionAccessor interface {
@@ -34,6 +36,13 @@ type ExpressionAccessor interface {
ReturnTypes() []*cel.Type
}
+// NamedExpressionAccessor extends NamedExpressionAccessor with a name.
+type NamedExpressionAccessor interface {
+ ExpressionAccessor
+
+ GetName() string // follows the naming convention of ExpressionAccessor
+}
+
// EvaluationResult contains the minimal required fields and metadata of a cel evaluation
type EvaluationResult struct {
EvalResult ref.Val
@@ -57,8 +66,7 @@ type OptionalVariableDeclarations struct {
// FilterCompiler contains a function to assist with converting types and values to/from CEL-typed values.
type FilterCompiler interface {
// Compile is used for the cel expression compilation
- // perCallLimit was added for testing purpose only. Callers should always use const PerCallLimit from k8s.io/apiserver/pkg/apis/cel/config.go as input.
- Compile(expressions []ExpressionAccessor, optionalDecls OptionalVariableDeclarations, perCallLimit uint64) Filter
+ Compile(expressions []ExpressionAccessor, optionalDecls OptionalVariableDeclarations, envType environment.Type) Filter
}
// OptionalVariableBindings provides expression bindings for optional CEL variables.
@@ -80,7 +88,7 @@ type Filter interface {
// ForInput converts compiled CEL-typed values into evaluated CEL-typed value.
// runtimeCELCostBudget was added for testing purpose only. Callers should always use const RuntimeCELCostBudget from k8s.io/apiserver/pkg/apis/cel/config.go as input.
// If cost budget is calculated, the filter should return the remaining budget.
- ForInput(ctx context.Context, versionedAttr *admission.VersionedAttributes, request *v1.AdmissionRequest, optionalVars OptionalVariableBindings, runtimeCELCostBudget int64) ([]EvaluationResult, int64, error)
+ ForInput(ctx context.Context, versionedAttr *admission.VersionedAttributes, request *v1.AdmissionRequest, optionalVars OptionalVariableBindings, namespace *corev1.Namespace, runtimeCELCostBudget int64) ([]EvaluationResult, int64, error)
// CompilationErrors returns a list of errors from the compilation of the evaluator
CompilationErrors() []error
diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/validatingadmissionpolicy/admission.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/validatingadmissionpolicy/admission.go
index 9a514b4631..e51bc6e737 100644
--- a/vendor/k8s.io/apiserver/pkg/admission/plugin/validatingadmissionpolicy/admission.go
+++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/validatingadmissionpolicy/admission.go
@@ -24,7 +24,6 @@ import (
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apiserver/pkg/authorization/authorizer"
- "k8s.io/apiserver/pkg/cel/openapi/resolver"
"k8s.io/apiserver/pkg/features"
"k8s.io/client-go/dynamic"
"k8s.io/component-base/featuregate"
@@ -74,7 +73,6 @@ type celAdmissionPlugin struct {
dynamicClient dynamic.Interface
stopCh <-chan struct{}
authorizer authorizer.Authorizer
- schemaResolver resolver.SchemaResolver
}
var _ initializer.WantsExternalKubeInformerFactory = &celAdmissionPlugin{}
@@ -83,7 +81,6 @@ var _ initializer.WantsRESTMapper = &celAdmissionPlugin{}
var _ initializer.WantsDynamicClient = &celAdmissionPlugin{}
var _ initializer.WantsDrainedNotification = &celAdmissionPlugin{}
var _ initializer.WantsAuthorizer = &celAdmissionPlugin{}
-var _ initializer.WantsSchemaResolver = &celAdmissionPlugin{}
var _ admission.InitializationValidator = &celAdmissionPlugin{}
var _ admission.ValidationInterface = &celAdmissionPlugin{}
@@ -116,11 +113,6 @@ func (c *celAdmissionPlugin) SetDrainedNotification(stopCh <-chan struct{}) {
func (c *celAdmissionPlugin) SetAuthorizer(authorizer authorizer.Authorizer) {
c.authorizer = authorizer
}
-
-func (c *celAdmissionPlugin) SetSchemaResolver(resolver resolver.SchemaResolver) {
- c.schemaResolver = resolver
-}
-
func (c *celAdmissionPlugin) InspectFeatureGates(featureGates featuregate.FeatureGate) {
if featureGates.Enabled(features.ValidatingAdmissionPolicy) {
c.enabled = true
@@ -154,7 +146,7 @@ func (c *celAdmissionPlugin) ValidateInitialization() error {
if c.authorizer == nil {
return errors.New("missing authorizer")
}
- c.evaluator = NewAdmissionController(c.informerFactory, c.client, c.restMapper, c.schemaResolver /* (optional) */, c.dynamicClient, c.authorizer)
+ c.evaluator = NewAdmissionController(c.informerFactory, c.client, c.restMapper, c.dynamicClient, c.authorizer)
if err := c.evaluator.ValidateInitialization(); err != nil {
return err
}
diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/validatingadmissionpolicy/caching_authorizer.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/validatingadmissionpolicy/caching_authorizer.go
new file mode 100644
index 0000000000..a295cb30dc
--- /dev/null
+++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/validatingadmissionpolicy/caching_authorizer.go
@@ -0,0 +1,133 @@
+/*
+Copyright 2023 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package validatingadmissionpolicy
+
+import (
+ "context"
+ "encoding/json"
+ "sort"
+ "strings"
+
+ "k8s.io/apiserver/pkg/authentication/user"
+ "k8s.io/apiserver/pkg/authorization/authorizer"
+)
+
+type authzResult struct {
+ authorized authorizer.Decision
+ reason string
+ err error
+}
+
+type cachingAuthorizer struct {
+ authorizer authorizer.Authorizer
+ decisions map[string]authzResult
+}
+
+func newCachingAuthorizer(in authorizer.Authorizer) authorizer.Authorizer {
+ return &cachingAuthorizer{
+ authorizer: in,
+ decisions: make(map[string]authzResult),
+ }
+}
+
+// The attribute accessors known to cache key construction. If this fails to compile, the cache
+// implementation may need to be updated.
+var _ authorizer.Attributes = (interface {
+ GetUser() user.Info
+ GetVerb() string
+ IsReadOnly() bool
+ GetNamespace() string
+ GetResource() string
+ GetSubresource() string
+ GetName() string
+ GetAPIGroup() string
+ GetAPIVersion() string
+ IsResourceRequest() bool
+ GetPath() string
+})(nil)
+
+// The user info accessors known to cache key construction. If this fails to compile, the cache
+// implementation may need to be updated.
+var _ user.Info = (interface {
+ GetName() string
+ GetUID() string
+ GetGroups() []string
+ GetExtra() map[string][]string
+})(nil)
+
+// Authorize returns an authorization decision by delegating to another Authorizer. If an equivalent
+// check has already been performed, a cached result is returned. Not safe for concurrent use.
+func (ca *cachingAuthorizer) Authorize(ctx context.Context, a authorizer.Attributes) (authorizer.Decision, string, error) {
+ serializableAttributes := authorizer.AttributesRecord{
+ Verb: a.GetVerb(),
+ Namespace: a.GetNamespace(),
+ APIGroup: a.GetAPIGroup(),
+ APIVersion: a.GetAPIVersion(),
+ Resource: a.GetResource(),
+ Subresource: a.GetSubresource(),
+ Name: a.GetName(),
+ ResourceRequest: a.IsResourceRequest(),
+ Path: a.GetPath(),
+ }
+
+ if u := a.GetUser(); u != nil {
+ di := &user.DefaultInfo{
+ Name: u.GetName(),
+ UID: u.GetUID(),
+ }
+
+ // Differently-ordered groups or extras could cause otherwise-equivalent checks to
+ // have distinct cache keys.
+ if groups := u.GetGroups(); len(groups) > 0 {
+ di.Groups = make([]string, len(groups))
+ copy(di.Groups, groups)
+ sort.Strings(di.Groups)
+ }
+
+ if extra := u.GetExtra(); len(extra) > 0 {
+ di.Extra = make(map[string][]string, len(extra))
+ for k, vs := range extra {
+ vdupe := make([]string, len(vs))
+ copy(vdupe, vs)
+ sort.Strings(vdupe)
+ di.Extra[k] = vdupe
+ }
+ }
+
+ serializableAttributes.User = di
+ }
+
+ var b strings.Builder
+ if err := json.NewEncoder(&b).Encode(serializableAttributes); err != nil {
+ return authorizer.DecisionNoOpinion, "", err
+ }
+ key := b.String()
+
+ if cached, ok := ca.decisions[key]; ok {
+ return cached.authorized, cached.reason, cached.err
+ }
+
+ authorized, reason, err := ca.authorizer.Authorize(ctx, a)
+
+ ca.decisions[key] = authzResult{
+ authorized: authorized,
+ reason: reason,
+ err: err,
+ }
+
+ return authorized, reason, err
+}
diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/validatingadmissionpolicy/controller.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/validatingadmissionpolicy/controller.go
index f54f1acb36..46b76e06d5 100644
--- a/vendor/k8s.io/apiserver/pkg/admission/plugin/validatingadmissionpolicy/controller.go
+++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/validatingadmissionpolicy/controller.go
@@ -25,30 +25,29 @@ import (
"sync/atomic"
"time"
- "k8s.io/klog/v2"
-
- "k8s.io/api/admissionregistration/v1alpha1"
+ "k8s.io/api/admissionregistration/v1beta1"
+ v1 "k8s.io/api/core/v1"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
utiljson "k8s.io/apimachinery/pkg/util/json"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apiserver/pkg/admission"
celmetrics "k8s.io/apiserver/pkg/admission/cel"
- "k8s.io/apiserver/pkg/admission/plugin/cel"
"k8s.io/apiserver/pkg/admission/plugin/validatingadmissionpolicy/internal/generic"
"k8s.io/apiserver/pkg/admission/plugin/validatingadmissionpolicy/matching"
celconfig "k8s.io/apiserver/pkg/apis/cel"
"k8s.io/apiserver/pkg/authorization/authorizer"
- "k8s.io/apiserver/pkg/cel/openapi/resolver"
"k8s.io/apiserver/pkg/warning"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/cache"
+ "k8s.io/klog/v2"
)
var _ CELPolicyEvaluator = &celAdmissionController{}
@@ -66,22 +65,24 @@ type celAdmissionController struct {
// A snapshot of the current policy configuration is synced with this field
// asynchronously
definitions atomic.Value
+
+ authz authorizer.Authorizer
}
// Everything someone might need to validate a single ValidatingPolicyDefinition
// against all of its registered bindings.
type policyData struct {
definitionInfo
- paramController generic.Controller[runtime.Object]
- bindings []bindingInfo
+ paramInfo
+ bindings []bindingInfo
}
// contains the cel PolicyDecisions along with the ValidatingAdmissionPolicy and ValidatingAdmissionPolicyBinding
// that determined the decision
type policyDecisionWithMetadata struct {
PolicyDecision
- Definition *v1alpha1.ValidatingAdmissionPolicy
- Binding *v1alpha1.ValidatingAdmissionPolicyBinding
+ Definition *v1beta1.ValidatingAdmissionPolicy
+ Binding *v1beta1.ValidatingAdmissionPolicyBinding
}
// namespaceName is used as a key in definitionInfo and bindingInfos
@@ -97,7 +98,7 @@ type definitionInfo struct {
// Last value seen by this controller to be used in policy enforcement
// May not be nil
- lastReconciledValue *v1alpha1.ValidatingAdmissionPolicy
+ lastReconciledValue *v1beta1.ValidatingAdmissionPolicy
}
type bindingInfo struct {
@@ -106,7 +107,7 @@ type bindingInfo struct {
// Last value seen by this controller to be used in policy enforcement
// May not be nil
- lastReconciledValue *v1alpha1.ValidatingAdmissionPolicyBinding
+ lastReconciledValue *v1beta1.ValidatingAdmissionPolicyBinding
}
type paramInfo struct {
@@ -116,6 +117,9 @@ type paramInfo struct {
// Function to call to stop the informer and clean up the controller
stop func()
+ // Whether this param is cluster or namespace scoped
+ scope meta.RESTScope
+
// Policy Definitions which refer to this param CRD
dependentDefinitions sets.Set[namespacedName]
}
@@ -125,29 +129,24 @@ func NewAdmissionController(
informerFactory informers.SharedInformerFactory,
client kubernetes.Interface,
restMapper meta.RESTMapper,
- schemaResolver resolver.SchemaResolver,
dynamicClient dynamic.Interface,
authz authorizer.Authorizer,
) CELPolicyEvaluator {
- var typeChecker *TypeChecker
- if schemaResolver != nil {
- typeChecker = &TypeChecker{schemaResolver: schemaResolver, restMapper: restMapper}
- }
return &celAdmissionController{
definitions: atomic.Value{},
policyController: newPolicyController(
restMapper,
client,
dynamicClient,
- typeChecker,
- cel.NewFilterCompiler(),
+ informerFactory,
+ nil,
NewMatcher(matching.NewMatcher(informerFactory.Core().V1().Namespaces().Lister(), client)),
- generic.NewInformer[*v1alpha1.ValidatingAdmissionPolicy](
- informerFactory.Admissionregistration().V1alpha1().ValidatingAdmissionPolicies().Informer()),
- generic.NewInformer[*v1alpha1.ValidatingAdmissionPolicyBinding](
- informerFactory.Admissionregistration().V1alpha1().ValidatingAdmissionPolicyBindings().Informer()),
- authz,
+ generic.NewInformer[*v1beta1.ValidatingAdmissionPolicy](
+ informerFactory.Admissionregistration().V1beta1().ValidatingAdmissionPolicies().Informer()),
+ generic.NewInformer[*v1beta1.ValidatingAdmissionPolicyBinding](
+ informerFactory.Admissionregistration().V1beta1().ValidatingAdmissionPolicyBindings().Informer()),
),
+ authz: authz,
}
}
@@ -193,21 +192,21 @@ func (c *celAdmissionController) Validate(
var deniedDecisions []policyDecisionWithMetadata
- addConfigError := func(err error, definition *v1alpha1.ValidatingAdmissionPolicy, binding *v1alpha1.ValidatingAdmissionPolicyBinding) {
+ addConfigError := func(err error, definition *v1beta1.ValidatingAdmissionPolicy, binding *v1beta1.ValidatingAdmissionPolicyBinding) {
// we always default the FailurePolicy if it is unset and validate it in API level
- var policy v1alpha1.FailurePolicyType
+ var policy v1beta1.FailurePolicyType
if definition.Spec.FailurePolicy == nil {
- policy = v1alpha1.Fail
+ policy = v1beta1.Fail
} else {
policy = *definition.Spec.FailurePolicy
}
// apply FailurePolicy specified in ValidatingAdmissionPolicy, the default would be Fail
switch policy {
- case v1alpha1.Ignore:
+ case v1beta1.Ignore:
// TODO: add metrics for ignored error here
return
- case v1alpha1.Fail:
+ case v1beta1.Fail:
var message string
if binding == nil {
message = fmt.Errorf("failed to configure policy: %w", err).Error()
@@ -235,9 +234,17 @@ func (c *celAdmissionController) Validate(
}
policyDatas := c.definitions.Load().([]policyData)
+ authz := newCachingAuthorizer(c.authz)
+
for _, definitionInfo := range policyDatas {
+ // versionedAttributes will be set to non-nil inside of the loop, but
+ // is scoped outside of the param loop so we only convert once. We defer
+ // conversion so that it is only performed when we know a policy matches,
+ // saving the cost of converting non-matching requests.
+ var versionedAttr *admission.VersionedAttributes
+
definition := definitionInfo.lastReconciledValue
- matches, matchKind, err := c.policyController.matcher.DefinitionMatches(a, o, definition)
+ matches, matchResource, matchKind, err := c.policyController.matcher.DefinitionMatches(a, o, definition)
if err != nil {
// Configuration error.
addConfigError(err, definition, nil)
@@ -267,65 +274,13 @@ func (c *celAdmissionController) Validate(
continue
}
- var param runtime.Object
-
- // versionedAttributes will be set to non-nil inside of the loop, but
- // is scoped outside of the param loop so we only convert once. We defer
- // conversion so that it is only performed when we know a policy matches,
- // saving the cost of converting non-matching requests.
- var versionedAttr *admission.VersionedAttributes
-
- // If definition has paramKind, paramRef is required in binding.
- // If definition has no paramKind, paramRef set in binding will be ignored.
- paramKind := definition.Spec.ParamKind
- paramRef := binding.Spec.ParamRef
- if paramKind != nil && paramRef != nil {
- paramController := definitionInfo.paramController
- if paramController == nil {
- addConfigError(fmt.Errorf("paramKind kind `%v` not known",
- paramKind.String()), definition, binding)
- continue
- }
-
- // If the param informer for this admission policy has not yet
- // had time to perform an initial listing, don't attempt to use
- // it.
- timeoutCtx, cancel := context.WithTimeout(c.policyController.context, 1*time.Second)
- defer cancel()
-
- if !cache.WaitForCacheSync(timeoutCtx.Done(), paramController.HasSynced) {
- addConfigError(fmt.Errorf("paramKind kind `%v` not yet synced to use for admission",
- paramKind.String()), definition, binding)
- continue
- }
-
- if len(paramRef.Namespace) == 0 {
- param, err = paramController.Informer().Get(paramRef.Name)
- } else {
- param, err = paramController.Informer().Namespaced(paramRef.Namespace).Get(paramRef.Name)
- }
-
- if err != nil {
- // Apply failure policy
- addConfigError(err, definition, binding)
-
- if k8serrors.IsInvalid(err) {
- // Param mis-configured
- // require to set paramRef.namespace for namespaced resource and unset paramRef.namespace for cluster scoped resource
- continue
- } else if k8serrors.IsNotFound(err) {
- // Param not yet available. User may need to wait a bit
- // before being able to use it for validation.
- continue
- }
-
- // There was a bad internal error
- utilruntime.HandleError(err)
- continue
- }
- }
-
- if versionedAttr == nil {
+ params, err := c.collectParams(definition.Spec.ParamKind, definitionInfo.paramInfo, binding.Spec.ParamRef, a.GetNamespace())
+ if err != nil {
+ addConfigError(err, definition, binding)
+ continue
+ } else if versionedAttr == nil && len(params) > 0 {
+ // As optimization versionedAttr creation is deferred until
+ // first use. Since > 0 params, we will validate
va, err := admission.NewVersionedAttributes(a, matchKind, o)
if err != nil {
wrappedErr := fmt.Errorf("failed to convert object version: %w", err)
@@ -335,68 +290,98 @@ func (c *celAdmissionController) Validate(
versionedAttr = va
}
- validationResult := bindingInfo.validator.Validate(ctx, versionedAttr, param, celconfig.RuntimeCELCostBudget)
- if err != nil {
- // runtime error. Apply failure policy
- wrappedError := fmt.Errorf("failed to evaluate CEL expression: %w", err)
- addConfigError(wrappedError, definition, binding)
- continue
+ var validationResults []ValidateResult
+ var namespace *v1.Namespace
+ namespaceName := a.GetNamespace()
+
+ // Special case, the namespace object has the namespace of itself (maybe a bug).
+ // unset it if the incoming object is a namespace
+ if gvk := a.GetKind(); gvk.Kind == "Namespace" && gvk.Version == "v1" && gvk.Group == "" {
+ namespaceName = ""
+ }
+
+ // if it is cluster scoped, namespaceName will be empty
+ // Otherwise, get the Namespace resource.
+ if namespaceName != "" {
+ namespace, err = c.policyController.matcher.GetNamespace(namespaceName)
+ if err != nil {
+ return err
+ }
}
- for i, decision := range validationResult.Decisions {
- switch decision.Action {
- case ActionAdmit:
- if decision.Evaluation == EvalError {
- celmetrics.Metrics.ObserveAdmissionWithError(ctx, decision.Elapsed, definition.Name, binding.Name, "active")
+ for _, param := range params {
+ var p runtime.Object = param
+ if p != nil && p.GetObjectKind().GroupVersionKind().Empty() {
+ // Make sure param has TypeMeta populated
+ // This is a simple hack to make sure typeMeta is
+ // available to CEL without making copies of objects, etc.
+ p = &wrappedParam{
+ TypeMeta: metav1.TypeMeta{
+ APIVersion: definition.Spec.ParamKind.APIVersion,
+ Kind: definition.Spec.ParamKind.Kind,
+ },
+ nested: param,
}
- case ActionDeny:
- for _, action := range binding.Spec.ValidationActions {
- switch action {
- case v1alpha1.Deny:
- deniedDecisions = append(deniedDecisions, policyDecisionWithMetadata{
- Definition: definition,
- Binding: binding,
- PolicyDecision: decision,
- })
- celmetrics.Metrics.ObserveRejection(ctx, decision.Elapsed, definition.Name, binding.Name, "active")
- case v1alpha1.Audit:
- c.publishValidationFailureAnnotation(binding, i, decision, versionedAttr)
- celmetrics.Metrics.ObserveAudit(ctx, decision.Elapsed, definition.Name, binding.Name, "active")
- case v1alpha1.Warn:
- warning.AddWarning(ctx, "", fmt.Sprintf("Validation failed for ValidatingAdmissionPolicy '%s' with binding '%s': %s", definition.Name, binding.Name, decision.Message))
- celmetrics.Metrics.ObserveWarn(ctx, decision.Elapsed, definition.Name, binding.Name, "active")
+ }
+ validationResults = append(validationResults, bindingInfo.validator.Validate(ctx, matchResource, versionedAttr, p, namespace, celconfig.RuntimeCELCostBudget, authz))
+ }
+
+ for _, validationResult := range validationResults {
+ for i, decision := range validationResult.Decisions {
+ switch decision.Action {
+ case ActionAdmit:
+ if decision.Evaluation == EvalError {
+ celmetrics.Metrics.ObserveAdmissionWithError(ctx, decision.Elapsed, definition.Name, binding.Name, "active")
}
+ case ActionDeny:
+ for _, action := range binding.Spec.ValidationActions {
+ switch action {
+ case v1beta1.Deny:
+ deniedDecisions = append(deniedDecisions, policyDecisionWithMetadata{
+ Definition: definition,
+ Binding: binding,
+ PolicyDecision: decision,
+ })
+ celmetrics.Metrics.ObserveRejection(ctx, decision.Elapsed, definition.Name, binding.Name, "active")
+ case v1beta1.Audit:
+ c.publishValidationFailureAnnotation(binding, i, decision, versionedAttr)
+ celmetrics.Metrics.ObserveAudit(ctx, decision.Elapsed, definition.Name, binding.Name, "active")
+ case v1beta1.Warn:
+ warning.AddWarning(ctx, "", fmt.Sprintf("Validation failed for ValidatingAdmissionPolicy '%s' with binding '%s': %s", definition.Name, binding.Name, decision.Message))
+ celmetrics.Metrics.ObserveWarn(ctx, decision.Elapsed, definition.Name, binding.Name, "active")
+ }
+ }
+ default:
+ return fmt.Errorf("unrecognized evaluation decision '%s' for ValidatingAdmissionPolicyBinding '%s' with ValidatingAdmissionPolicy '%s'",
+ decision.Action, binding.Name, definition.Name)
}
- default:
- return fmt.Errorf("unrecognized evaluation decision '%s' for ValidatingAdmissionPolicyBinding '%s' with ValidatingAdmissionPolicy '%s'",
- decision.Action, binding.Name, definition.Name)
}
- }
- for _, auditAnnotation := range validationResult.AuditAnnotations {
- switch auditAnnotation.Action {
- case AuditAnnotationActionPublish:
- value := auditAnnotation.Value
- if len(auditAnnotation.Value) > maxAuditAnnotationValueLength {
- value = value[:maxAuditAnnotationValueLength]
+ for _, auditAnnotation := range validationResult.AuditAnnotations {
+ switch auditAnnotation.Action {
+ case AuditAnnotationActionPublish:
+ value := auditAnnotation.Value
+ if len(auditAnnotation.Value) > maxAuditAnnotationValueLength {
+ value = value[:maxAuditAnnotationValueLength]
+ }
+ auditAnnotationCollector.add(auditAnnotation.Key, value)
+ case AuditAnnotationActionError:
+ // When failurePolicy=fail, audit annotation errors result in deny
+ deniedDecisions = append(deniedDecisions, policyDecisionWithMetadata{
+ Definition: definition,
+ Binding: binding,
+ PolicyDecision: PolicyDecision{
+ Action: ActionDeny,
+ Evaluation: EvalError,
+ Message: auditAnnotation.Error,
+ Elapsed: auditAnnotation.Elapsed,
+ },
+ })
+ celmetrics.Metrics.ObserveRejection(ctx, auditAnnotation.Elapsed, definition.Name, binding.Name, "active")
+ case AuditAnnotationActionExclude: // skip it
+ default:
+ return fmt.Errorf("unsupported AuditAnnotation Action: %s", auditAnnotation.Action)
}
- auditAnnotationCollector.add(auditAnnotation.Key, value)
- case AuditAnnotationActionError:
- // When failurePolicy=fail, audit annotation errors result in deny
- deniedDecisions = append(deniedDecisions, policyDecisionWithMetadata{
- Definition: definition,
- Binding: binding,
- PolicyDecision: PolicyDecision{
- Action: ActionDeny,
- Evaluation: EvalError,
- Message: auditAnnotation.Error,
- Elapsed: auditAnnotation.Elapsed,
- },
- })
- celmetrics.Metrics.ObserveRejection(ctx, auditAnnotation.Elapsed, definition.Name, binding.Name, "active")
- case AuditAnnotationActionExclude: // skip it
- default:
- return fmt.Errorf("unsupported AuditAnnotation Action: %s", auditAnnotation.Action)
}
}
}
@@ -425,7 +410,124 @@ func (c *celAdmissionController) Validate(
return nil
}
-func (c *celAdmissionController) publishValidationFailureAnnotation(binding *v1alpha1.ValidatingAdmissionPolicyBinding, expressionIndex int, decision PolicyDecision, attributes admission.Attributes) {
+// Returns objects to use to evaluate the policy
+func (c *celAdmissionController) collectParams(
+ paramKind *v1beta1.ParamKind,
+ info paramInfo,
+ paramRef *v1beta1.ParamRef,
+ namespace string,
+) ([]runtime.Object, error) {
+ // If definition has paramKind, paramRef is required in binding.
+ // If definition has no paramKind, paramRef set in binding will be ignored.
+ var params []runtime.Object
+ var paramStore generic.NamespacedLister[runtime.Object]
+
+ // Make sure the param kind is ready to use
+ if paramKind != nil && paramRef != nil {
+ if info.controller == nil {
+ return nil, fmt.Errorf("paramKind kind `%v` not known",
+ paramKind.String())
+ }
+
+ // Set up cluster-scoped, or namespaced access to the params
+ // "default" if not provided, and paramKind is namespaced
+ paramStore = info.controller.Informer()
+ if info.scope.Name() == meta.RESTScopeNameNamespace {
+ paramsNamespace := namespace
+ if len(paramRef.Namespace) > 0 {
+ paramsNamespace = paramRef.Namespace
+ } else if len(paramsNamespace) == 0 {
+ // You must supply namespace if your matcher can possibly
+ // match a cluster-scoped resource
+ return nil, fmt.Errorf("cannot use namespaced paramRef in policy binding that matches cluster-scoped resources")
+ }
+
+ paramStore = info.controller.Informer().Namespaced(paramsNamespace)
+ }
+
+ // If the param informer for this admission policy has not yet
+ // had time to perform an initial listing, don't attempt to use
+ // it.
+ timeoutCtx, cancel := context.WithTimeout(c.policyController.context, 1*time.Second)
+ defer cancel()
+
+ if !cache.WaitForCacheSync(timeoutCtx.Done(), info.controller.HasSynced) {
+ return nil, fmt.Errorf("paramKind kind `%v` not yet synced to use for admission",
+ paramKind.String())
+ }
+ }
+
+ // Find params to use with policy
+ switch {
+ case paramKind == nil:
+ // ParamKind is unset. Ignore any globalParamRef or namespaceParamRef
+ // setting.
+ return []runtime.Object{nil}, nil
+ case paramRef == nil:
+ // Policy ParamKind is set, but binding does not use it.
+ // Validate with nil params
+ return []runtime.Object{nil}, nil
+ case len(paramRef.Namespace) > 0 && info.scope.Name() == meta.RESTScopeRoot.Name():
+ // Not allowed to set namespace for cluster-scoped param
+ return nil, fmt.Errorf("paramRef.namespace must not be provided for a cluster-scoped `paramKind`")
+
+ case len(paramRef.Name) > 0:
+ if paramRef.Selector != nil {
+ // This should be validated, but just in case.
+ return nil, fmt.Errorf("paramRef.name and paramRef.selector are mutually exclusive")
+ }
+
+ switch param, err := paramStore.Get(paramRef.Name); {
+ case err == nil:
+ params = []runtime.Object{param}
+ case k8serrors.IsNotFound(err):
+ // Param not yet available. User may need to wait a bit
+ // before being able to use it for validation.
+ //
+ // Set params to nil to prepare for not found action
+ params = nil
+ case k8serrors.IsInvalid(err):
+ // Param mis-configured
+ // require to set namespace for namespaced resource
+ // and unset namespace for cluster scoped resource
+ return nil, err
+ default:
+ // Internal error
+ utilruntime.HandleError(err)
+ return nil, err
+ }
+ case paramRef.Selector != nil:
+ // Select everything by default if empty name and selector
+ selector, err := metav1.LabelSelectorAsSelector(paramRef.Selector)
+ if err != nil {
+ // Cannot parse label selector: configuration error
+ return nil, err
+
+ }
+
+ paramList, err := paramStore.List(selector)
+ if err != nil {
+ // There was a bad internal error
+ utilruntime.HandleError(err)
+ return nil, err
+ }
+
+ // Successfully grabbed params
+ params = paramList
+ default:
+ // Should be unreachable due to validation
+ return nil, fmt.Errorf("one of name or selector must be provided")
+ }
+
+ // Apply fail action for params not found case
+ if len(params) == 0 && paramRef.ParameterNotFoundAction != nil && *paramRef.ParameterNotFoundAction == v1beta1.DenyAction {
+ return nil, errors.New("no params found for policy binding with `Deny` parameterNotFoundAction")
+ }
+
+ return params, nil
+}
+
+func (c *celAdmissionController) publishValidationFailureAnnotation(binding *v1beta1.ValidatingAdmissionPolicyBinding, expressionIndex int, decision PolicyDecision, attributes admission.Attributes) {
key := "validation.policy.admission.k8s.io/validation_failure"
// Marshal to a list of failures since, in the future, we may need to support multiple failures
valueJson, err := utiljson.Marshal([]validationFailureValue{{
@@ -459,11 +561,11 @@ func (c *celAdmissionController) refreshPolicies() {
// validationFailureValue defines the JSON format of a "validation.policy.admission.k8s.io/validation_failure" audit
// annotation value.
type validationFailureValue struct {
- Message string `json:"message"`
- Policy string `json:"policy"`
- Binding string `json:"binding"`
- ExpressionIndex int `json:"expressionIndex"`
- ValidationActions []v1alpha1.ValidationAction `json:"validationActions"`
+ Message string `json:"message"`
+ Policy string `json:"policy"`
+ Binding string `json:"binding"`
+ ExpressionIndex int `json:"expressionIndex"`
+ ValidationActions []v1beta1.ValidationAction `json:"validationActions"`
}
type auditAnnotationCollector struct {
@@ -500,3 +602,48 @@ func (a auditAnnotationCollector) publish(policyName string, attributes admissio
}
}
}
+
+// A workaround to fact that native types do not have TypeMeta populated, which
+// is needed for CEL expressions to be able to access the value.
+type wrappedParam struct {
+ metav1.TypeMeta
+ nested runtime.Object
+}
+
+func (w *wrappedParam) MarshalJSON() ([]byte, error) {
+ return nil, errors.New("MarshalJSON unimplemented for wrappedParam")
+}
+
+func (w *wrappedParam) UnmarshalJSON(data []byte) error {
+ return errors.New("UnmarshalJSON unimplemented for wrappedParam")
+}
+
+func (w *wrappedParam) ToUnstructured() interface{} {
+ res, err := runtime.DefaultUnstructuredConverter.ToUnstructured(w.nested)
+
+ if err != nil {
+ return nil
+ }
+
+ metaRes, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&w.TypeMeta)
+ if err != nil {
+ return nil
+ }
+
+ for k, v := range metaRes {
+ res[k] = v
+ }
+
+ return res
+}
+
+func (w *wrappedParam) DeepCopyObject() runtime.Object {
+ return &wrappedParam{
+ TypeMeta: w.TypeMeta,
+ nested: w.nested.DeepCopyObject(),
+ }
+}
+
+func (w *wrappedParam) GetObjectKind() schema.ObjectKind {
+ return w
+}
diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/validatingadmissionpolicy/controller_reconcile.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/validatingadmissionpolicy/controller_reconcile.go
index 296ac416aa..b2624694c8 100644
--- a/vendor/k8s.io/apiserver/pkg/admission/plugin/validatingadmissionpolicy/controller_reconcile.go
+++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/validatingadmissionpolicy/controller_reconcile.go
@@ -23,11 +23,10 @@ import (
"time"
v1 "k8s.io/api/admissionregistration/v1"
- "k8s.io/api/admissionregistration/v1alpha1"
+ "k8s.io/api/admissionregistration/v1beta1"
corev1 "k8s.io/api/core/v1"
apiequality "k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/api/meta"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
@@ -36,13 +35,11 @@ import (
"k8s.io/apiserver/pkg/admission/plugin/cel"
"k8s.io/apiserver/pkg/admission/plugin/validatingadmissionpolicy/internal/generic"
"k8s.io/apiserver/pkg/admission/plugin/webhook/matchconditions"
- celconfig "k8s.io/apiserver/pkg/apis/cel"
- "k8s.io/apiserver/pkg/authorization/authorizer"
+ "k8s.io/apiserver/pkg/cel/environment"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/dynamic/dynamicinformer"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes"
- k8sscheme "k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/tools/cache"
)
@@ -50,36 +47,30 @@ type policyController struct {
once sync.Once
context context.Context
dynamicClient dynamic.Interface
+ informerFactory informers.SharedInformerFactory
restMapper meta.RESTMapper
- policyDefinitionsController generic.Controller[*v1alpha1.ValidatingAdmissionPolicy]
- policyBindingController generic.Controller[*v1alpha1.ValidatingAdmissionPolicyBinding]
+ policyDefinitionsController generic.Controller[*v1beta1.ValidatingAdmissionPolicy]
+ policyBindingController generic.Controller[*v1beta1.ValidatingAdmissionPolicyBinding]
// Provided to the policy's Compile function as an injected dependency to
// assist with compiling its expressions to CEL
+ // pass nil to create filter compiler in demand
filterCompiler cel.FilterCompiler
matcher Matcher
newValidator
- // The TypeCheck checks the policy's expressions for type errors.
- // Type of params is defined in policy.Spec.ParamsKind
- // Types of object are calculated from policy.Spec.MatchingConstraints
- typeChecker *TypeChecker
-
- // Lock which protects:
- // - cachedPolicies
- // - paramCRDControllers
- // - definitionInfo
- // - bindingInfos
- // - definitionsToBindings
- // All other fields should be assumed constant
+ client kubernetes.Interface
+ // Lock which protects
+ // All Below fields
+ // All above fields should be assumed constant
mutex sync.RWMutex
cachedPolicies []policyData
// controller and metadata
- paramsCRDControllers map[v1alpha1.ParamKind]*paramInfo
+ paramsCRDControllers map[v1beta1.ParamKind]*paramInfo
// Index for each definition namespace/name, contains all binding
// namespace/names known to exist for that definition
@@ -94,32 +85,26 @@ type policyController struct {
// All keys must have at least one dependent binding
// All binding names MUST exist as a key bindingInfos
definitionsToBindings map[namespacedName]sets.Set[namespacedName]
-
- client kubernetes.Interface
-
- authz authorizer.Authorizer
}
-type newValidator func(validationFilter cel.Filter, celMatcher matchconditions.Matcher, auditAnnotationFilter, messageFilter cel.Filter, failurePolicy *v1.FailurePolicyType, authorizer authorizer.Authorizer) Validator
+type newValidator func(validationFilter cel.Filter, celMatcher matchconditions.Matcher, auditAnnotationFilter, messageFilter cel.Filter, failurePolicy *v1.FailurePolicyType) Validator
func newPolicyController(
restMapper meta.RESTMapper,
client kubernetes.Interface,
dynamicClient dynamic.Interface,
- typeChecker *TypeChecker,
+ informerFactory informers.SharedInformerFactory,
filterCompiler cel.FilterCompiler,
matcher Matcher,
- policiesInformer generic.Informer[*v1alpha1.ValidatingAdmissionPolicy],
- bindingsInformer generic.Informer[*v1alpha1.ValidatingAdmissionPolicyBinding],
- authz authorizer.Authorizer,
+ policiesInformer generic.Informer[*v1beta1.ValidatingAdmissionPolicy],
+ bindingsInformer generic.Informer[*v1beta1.ValidatingAdmissionPolicyBinding],
) *policyController {
res := &policyController{}
*res = policyController{
filterCompiler: filterCompiler,
- typeChecker: typeChecker,
definitionInfo: make(map[namespacedName]*definitionInfo),
bindingInfos: make(map[namespacedName]*bindingInfo),
- paramsCRDControllers: make(map[v1alpha1.ParamKind]*paramInfo),
+ paramsCRDControllers: make(map[v1beta1.ParamKind]*paramInfo),
definitionsToBindings: make(map[namespacedName]sets.Set[namespacedName]),
matcher: matcher,
newValidator: NewValidator,
@@ -139,10 +124,10 @@ func newPolicyController(
Name: "cel-policy-bindings",
},
),
- restMapper: restMapper,
- dynamicClient: dynamicClient,
- client: client,
- authz: authz,
+ restMapper: restMapper,
+ dynamicClient: dynamicClient,
+ informerFactory: informerFactory,
+ client: client,
}
return res
}
@@ -175,20 +160,14 @@ func (c *policyController) HasSynced() bool {
return c.policyDefinitionsController.HasSynced() && c.policyBindingController.HasSynced()
}
-func (c *policyController) reconcilePolicyDefinition(namespace, name string, definition *v1alpha1.ValidatingAdmissionPolicy) error {
+func (c *policyController) reconcilePolicyDefinition(namespace, name string, definition *v1beta1.ValidatingAdmissionPolicy) error {
c.mutex.Lock()
defer c.mutex.Unlock()
err := c.reconcilePolicyDefinitionSpec(namespace, name, definition)
- if err != nil {
- return err
- }
- if c.typeChecker != nil {
- err = c.reconcilePolicyStatus(namespace, name, definition)
- }
return err
}
-func (c *policyController) reconcilePolicyDefinitionSpec(namespace, name string, definition *v1alpha1.ValidatingAdmissionPolicy) error {
+func (c *policyController) reconcilePolicyDefinitionSpec(namespace, name string, definition *v1beta1.ValidatingAdmissionPolicy) error {
c.cachedPolicies = nil // invalidate cachedPolicies
// Namespace for policydefinition is empty.
@@ -207,7 +186,7 @@ func (c *policyController) reconcilePolicyDefinitionSpec(namespace, name string,
return nil
}
- var paramSource *v1alpha1.ParamKind
+ var paramSource *v1beta1.ParamKind
if definition != nil {
paramSource = definition.Spec.ParamKind
}
@@ -253,7 +232,6 @@ func (c *policyController) reconcilePolicyDefinitionSpec(namespace, name string,
// Skip setting up controller for empty param type
return nil
}
-
// find GVR for params
// Parse param source into a GVK
@@ -280,104 +258,78 @@ func (c *policyController) reconcilePolicyDefinitionSpec(namespace, name string,
return info.configurationError
}
- if info, ok := c.paramsCRDControllers[*paramSource]; ok {
- // If a param controller is already active for this paramsource, make
- // sure it is tracking this policy's dependency upon it
- info.dependentDefinitions.Insert(nn)
+ paramInfo := c.ensureParamInfo(paramSource, paramsGVR)
+ paramInfo.dependentDefinitions.Insert(nn)
- } else {
- instanceContext, instanceCancel := context.WithCancel(c.context)
-
- var informer cache.SharedIndexInformer
-
- // Informer Factory is optional
- if c.client != nil {
- // Create temporary informer factory
- // Cannot use the k8s shared informer factory for dynamic params informer.
- // Would leak unnecessary informers when we are done since we would have to
- // call informerFactory.Start() with a longer-lived stopCh than necessary.
- // SharedInformerFactory does not support temporary usage.
- dynamicFactory := informers.NewSharedInformerFactory(c.client, 10*time.Minute)
-
- // Look for a typed informer. If it does not exist
- genericInformer, err := dynamicFactory.ForResource(paramsGVR.Resource)
-
- // Ignore error. We fallback to dynamic informer if there is no
- // typed informer
- if err != nil {
- informer = nil
- } else {
- informer = genericInformer.Informer()
-
- // Set transformer on the informer to workaround inconsistency
- // where typed objects have TypeMeta wiped out but dynamic
- // objects keep kind/apiVersion fields
- informer.SetTransform(func(i interface{}) (interface{}, error) {
- // Ensure param is populated with its GVK for consistency
- // (CRD dynamic informer always returns objects with kind/apiversion,
- // but native types do not include populated TypeMeta.
- if param := i.(runtime.Object); param != nil {
- if param.GetObjectKind().GroupVersionKind().Empty() {
- // https://github.com/kubernetes/client-go/issues/413#issue-324586398
- gvks, _, _ := k8sscheme.Scheme.ObjectKinds(param)
- for _, gvk := range gvks {
- if len(gvk.Kind) == 0 {
- continue
- }
- if len(gvk.Version) == 0 || gvk.Version == runtime.APIVersionInternal {
- continue
- }
- param.GetObjectKind().SetGroupVersionKind(gvk)
- break
- }
- }
- }
+ return nil
+}
- return i, nil
- })
- }
- }
+// Ensures that there is an informer started for the given GVK to be used as a
+// param
+func (c *policyController) ensureParamInfo(paramSource *v1beta1.ParamKind, mapping *meta.RESTMapping) *paramInfo {
+ if info, ok := c.paramsCRDControllers[*paramSource]; ok {
+ return info
+ }
- if informer == nil {
- // Dynamic JSON informer fallback.
- // Cannot use shared dynamic informer since it would be impossible
- // to clean CRD informers properly with multiple dependents
- // (cannot start ahead of time, and cannot track dependencies via stopCh)
- informer = dynamicinformer.NewFilteredDynamicInformer(
- c.dynamicClient,
- paramsGVR.Resource,
- corev1.NamespaceAll,
- // Use same interval as is used for k8s typed sharedInformerFactory
- // https://github.com/kubernetes/kubernetes/blob/7e0923899fed622efbc8679cca6b000d43633e38/cmd/kube-apiserver/app/server.go#L430
- 10*time.Minute,
- cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
- nil,
- ).Informer()
- }
+ // We are not watching this param. Start an informer for it.
+ instanceContext, instanceCancel := context.WithCancel(c.context)
- controller := generic.NewController(
- generic.NewInformer[runtime.Object](informer),
- c.reconcileParams,
- generic.ControllerOptions{
- Workers: 1,
- Name: paramSource.String() + "-controller",
- },
- )
+ var informer cache.SharedIndexInformer
- c.paramsCRDControllers[*paramSource] = ¶mInfo{
- controller: controller,
- stop: instanceCancel,
- dependentDefinitions: sets.New(nn),
- }
+ // Try to see if our provided informer factory has an informer for this type.
+ // We assume the informer is already started, and starts all types associated
+ // with it.
+ if genericInformer, err := c.informerFactory.ForResource(mapping.Resource); err == nil {
+ informer = genericInformer.Informer()
- go controller.Run(instanceContext)
+ // Ensure the informer is started
+ // Use policyController's context rather than the instance context.
+ // PolicyController context is expected to last until app shutdown
+ // This is due to behavior of informerFactory which would cause the
+ // informer to stop running once the context is cancelled, and
+ // never started again.
+ c.informerFactory.Start(c.context.Done())
+ } else {
+ // Dynamic JSON informer fallback.
+ // Cannot use shared dynamic informer since it would be impossible
+ // to clean CRD informers properly with multiple dependents
+ // (cannot start ahead of time, and cannot track dependencies via stopCh)
+ informer = dynamicinformer.NewFilteredDynamicInformer(
+ c.dynamicClient,
+ mapping.Resource,
+ corev1.NamespaceAll,
+ // Use same interval as is used for k8s typed sharedInformerFactory
+ // https://github.com/kubernetes/kubernetes/blob/7e0923899fed622efbc8679cca6b000d43633e38/cmd/kube-apiserver/app/server.go#L430
+ 10*time.Minute,
+ cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
+ nil,
+ ).Informer()
go informer.Run(instanceContext.Done())
}
- return nil
+ controller := generic.NewController(
+ generic.NewInformer[runtime.Object](informer),
+ c.reconcileParams,
+ generic.ControllerOptions{
+ Workers: 1,
+ Name: paramSource.String() + "-controller",
+ },
+ )
+
+ ret := ¶mInfo{
+ controller: controller,
+ stop: instanceCancel,
+ scope: mapping.Scope,
+ dependentDefinitions: sets.New[namespacedName](),
+ }
+ c.paramsCRDControllers[*paramSource] = ret
+
+ go controller.Run(instanceContext)
+ return ret
+
}
-func (c *policyController) reconcilePolicyBinding(namespace, name string, binding *v1alpha1.ValidatingAdmissionPolicyBinding) error {
+func (c *policyController) reconcilePolicyBinding(namespace, name string, binding *v1beta1.ValidatingAdmissionPolicyBinding) error {
c.mutex.Lock()
defer c.mutex.Unlock()
@@ -443,30 +395,6 @@ func (c *policyController) reconcilePolicyBinding(namespace, name string, bindin
return nil
}
-func (c *policyController) reconcilePolicyStatus(namespace, name string, definition *v1alpha1.ValidatingAdmissionPolicy) error {
- if definition != nil && definition.Status.ObservedGeneration < definition.Generation {
- st := c.calculatePolicyStatus(definition)
- newDefinition := definition.DeepCopy()
- newDefinition.Status = *st
- _, err := c.client.AdmissionregistrationV1alpha1().ValidatingAdmissionPolicies().UpdateStatus(c.context, newDefinition, metav1.UpdateOptions{})
- if err != nil {
- // ignore error when the controller is not able to
- // mutate the definition, and to avoid infinite requeue.
- utilruntime.HandleError(err)
- }
- }
- return nil
-}
-
-func (c *policyController) calculatePolicyStatus(definition *v1alpha1.ValidatingAdmissionPolicy) *v1alpha1.ValidatingAdmissionPolicyStatus {
- expressionWarnings := c.typeChecker.Check(definition)
- // modifying a deepcopy of the original status, preserving unrelated existing data
- status := definition.Status.DeepCopy()
- status.ObservedGeneration = definition.Generation
- status.TypeChecking = &v1alpha1.TypeChecking{ExpressionWarnings: expressionWarnings}
- return status
-}
-
func (c *policyController) reconcileParams(namespace, name string, params runtime.Object) error {
// Do nothing.
// When we add informational type checking we will need to compile in the
@@ -504,39 +432,49 @@ func (c *policyController) latestPolicyData() []policyData {
}
optionalVars := cel.OptionalVariableDeclarations{HasParams: hasParam, HasAuthorizer: true}
expressionOptionalVars := cel.OptionalVariableDeclarations{HasParams: hasParam, HasAuthorizer: false}
- failurePolicy := convertv1alpha1FailurePolicyTypeTov1FailurePolicyType(definitionInfo.lastReconciledValue.Spec.FailurePolicy)
+ failurePolicy := convertv1beta1FailurePolicyTypeTov1FailurePolicyType(definitionInfo.lastReconciledValue.Spec.FailurePolicy)
var matcher matchconditions.Matcher = nil
matchConditions := definitionInfo.lastReconciledValue.Spec.MatchConditions
+
+ filterCompiler := c.filterCompiler
+ if filterCompiler == nil {
+ compositedCompiler, err := cel.NewCompositedCompiler(environment.MustBaseEnvSet(environment.DefaultCompatibilityVersion()))
+ if err == nil {
+ filterCompiler = compositedCompiler
+ compositedCompiler.CompileAndStoreVariables(convertv1beta1Variables(definitionInfo.lastReconciledValue.Spec.Variables), optionalVars, environment.StoredExpressions)
+ } else {
+ utilruntime.HandleError(err)
+ }
+ }
if len(matchConditions) > 0 {
matchExpressionAccessors := make([]cel.ExpressionAccessor, len(matchConditions))
for i := range matchConditions {
matchExpressionAccessors[i] = (*matchconditions.MatchCondition)(&matchConditions[i])
}
- matcher = matchconditions.NewMatcher(c.filterCompiler.Compile(matchExpressionAccessors, optionalVars, celconfig.PerCallLimit), c.authz, failurePolicy, "validatingadmissionpolicy", definitionInfo.lastReconciledValue.Name)
+ matcher = matchconditions.NewMatcher(filterCompiler.Compile(matchExpressionAccessors, optionalVars, environment.StoredExpressions), failurePolicy, "policy", "validate", definitionInfo.lastReconciledValue.Name)
}
bindingInfo.validator = c.newValidator(
- c.filterCompiler.Compile(convertv1alpha1Validations(definitionInfo.lastReconciledValue.Spec.Validations), optionalVars, celconfig.PerCallLimit),
+ filterCompiler.Compile(convertv1beta1Validations(definitionInfo.lastReconciledValue.Spec.Validations), optionalVars, environment.StoredExpressions),
matcher,
- c.filterCompiler.Compile(convertv1alpha1AuditAnnotations(definitionInfo.lastReconciledValue.Spec.AuditAnnotations), optionalVars, celconfig.PerCallLimit),
- c.filterCompiler.Compile(convertV1Alpha1MessageExpressions(definitionInfo.lastReconciledValue.Spec.Validations), expressionOptionalVars, celconfig.PerCallLimit),
+ filterCompiler.Compile(convertv1beta1AuditAnnotations(definitionInfo.lastReconciledValue.Spec.AuditAnnotations), optionalVars, environment.StoredExpressions),
+ filterCompiler.Compile(convertv1beta1MessageExpressions(definitionInfo.lastReconciledValue.Spec.Validations), expressionOptionalVars, environment.StoredExpressions),
failurePolicy,
- c.authz,
)
}
bindingInfos = append(bindingInfos, *bindingInfo)
}
- var paramController generic.Controller[runtime.Object]
+ var pInfo paramInfo
if paramKind := definitionInfo.lastReconciledValue.Spec.ParamKind; paramKind != nil {
if info, ok := c.paramsCRDControllers[*paramKind]; ok {
- paramController = info.controller
+ pInfo = *info
}
}
res = append(res, policyData{
- definitionInfo: *definitionInfo,
- paramController: paramController,
- bindings: bindingInfos,
+ definitionInfo: *definitionInfo,
+ paramInfo: pInfo,
+ bindings: bindingInfos,
})
}
@@ -544,21 +482,21 @@ func (c *policyController) latestPolicyData() []policyData {
return res
}
-func convertv1alpha1FailurePolicyTypeTov1FailurePolicyType(policyType *v1alpha1.FailurePolicyType) *v1.FailurePolicyType {
+func convertv1beta1FailurePolicyTypeTov1FailurePolicyType(policyType *v1beta1.FailurePolicyType) *v1.FailurePolicyType {
if policyType == nil {
return nil
}
var v1FailPolicy v1.FailurePolicyType
- if *policyType == v1alpha1.Fail {
+ if *policyType == v1beta1.Fail {
v1FailPolicy = v1.Fail
- } else if *policyType == v1alpha1.Ignore {
+ } else if *policyType == v1beta1.Ignore {
v1FailPolicy = v1.Ignore
}
return &v1FailPolicy
}
-func convertv1alpha1Validations(inputValidations []v1alpha1.Validation) []cel.ExpressionAccessor {
+func convertv1beta1Validations(inputValidations []v1beta1.Validation) []cel.ExpressionAccessor {
celExpressionAccessor := make([]cel.ExpressionAccessor, len(inputValidations))
for i, validation := range inputValidations {
validation := ValidationCondition{
@@ -571,7 +509,7 @@ func convertv1alpha1Validations(inputValidations []v1alpha1.Validation) []cel.Ex
return celExpressionAccessor
}
-func convertV1Alpha1MessageExpressions(inputValidations []v1alpha1.Validation) []cel.ExpressionAccessor {
+func convertv1beta1MessageExpressions(inputValidations []v1beta1.Validation) []cel.ExpressionAccessor {
celExpressionAccessor := make([]cel.ExpressionAccessor, len(inputValidations))
for i, validation := range inputValidations {
if validation.MessageExpression != "" {
@@ -584,7 +522,7 @@ func convertV1Alpha1MessageExpressions(inputValidations []v1alpha1.Validation) [
return celExpressionAccessor
}
-func convertv1alpha1AuditAnnotations(inputValidations []v1alpha1.AuditAnnotation) []cel.ExpressionAccessor {
+func convertv1beta1AuditAnnotations(inputValidations []v1beta1.AuditAnnotation) []cel.ExpressionAccessor {
celExpressionAccessor := make([]cel.ExpressionAccessor, len(inputValidations))
for i, validation := range inputValidations {
validation := AuditAnnotationCondition{
@@ -596,6 +534,14 @@ func convertv1alpha1AuditAnnotations(inputValidations []v1alpha1.AuditAnnotation
return celExpressionAccessor
}
+func convertv1beta1Variables(variables []v1beta1.Variable) []cel.NamedExpressionAccessor {
+ namedExpressions := make([]cel.NamedExpressionAccessor, len(variables))
+ for i, variable := range variables {
+ namedExpressions[i] = &Variable{Name: variable.Name, Expression: variable.Expression}
+ }
+ return namedExpressions
+}
+
func getNamespaceName(namespace, name string) namespacedName {
return namespacedName{
namespace: namespace,
diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/validatingadmissionpolicy/interface.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/validatingadmissionpolicy/interface.go
index 0f84152e8b..206fc13783 100644
--- a/vendor/k8s.io/apiserver/pkg/admission/plugin/validatingadmissionpolicy/interface.go
+++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/validatingadmissionpolicy/interface.go
@@ -21,12 +21,14 @@ import (
celgo "github.com/google/cel-go/cel"
- "k8s.io/api/admissionregistration/v1alpha1"
+ "k8s.io/api/admissionregistration/v1beta1"
+ corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apiserver/pkg/admission"
"k8s.io/apiserver/pkg/admission/plugin/cel"
+ "k8s.io/apiserver/pkg/authorization/authorizer"
)
var _ cel.ExpressionAccessor = &ValidationCondition{}
@@ -60,17 +62,39 @@ func (v *AuditAnnotationCondition) ReturnTypes() []*celgo.Type {
return []*celgo.Type{celgo.StringType, celgo.NullType}
}
+// Variable is a named expression for composition.
+type Variable struct {
+ Name string
+ Expression string
+}
+
+func (v *Variable) GetExpression() string {
+ return v.Expression
+}
+
+func (v *Variable) ReturnTypes() []*celgo.Type {
+ return []*celgo.Type{celgo.AnyType, celgo.DynType}
+}
+
+func (v *Variable) GetName() string {
+ return v.Name
+}
+
// Matcher is used for matching ValidatingAdmissionPolicy and ValidatingAdmissionPolicyBinding to attributes
type Matcher interface {
admission.InitializationValidator
// DefinitionMatches says whether this policy definition matches the provided admission
// resource request
- DefinitionMatches(a admission.Attributes, o admission.ObjectInterfaces, definition *v1alpha1.ValidatingAdmissionPolicy) (bool, schema.GroupVersionKind, error)
+ DefinitionMatches(a admission.Attributes, o admission.ObjectInterfaces, definition *v1beta1.ValidatingAdmissionPolicy) (bool, schema.GroupVersionResource, schema.GroupVersionKind, error)
// BindingMatches says whether this policy definition matches the provided admission
// resource request
- BindingMatches(a admission.Attributes, o admission.ObjectInterfaces, definition *v1alpha1.ValidatingAdmissionPolicyBinding) (bool, error)
+ BindingMatches(a admission.Attributes, o admission.ObjectInterfaces, definition *v1beta1.ValidatingAdmissionPolicyBinding) (bool, error)
+
+ // GetNamespace retrieves the Namespace resource by the given name. The name may be empty, in which case
+ // GetNamespace must return nil, nil
+ GetNamespace(name string) (*corev1.Namespace, error)
}
// ValidateResult defines the result of a Validator.Validate operation.
@@ -85,5 +109,5 @@ type ValidateResult struct {
type Validator interface {
// Validate is used to take cel evaluations and convert into decisions
// runtimeCELCostBudget was added for testing purpose only. Callers should always use const RuntimeCELCostBudget from k8s.io/apiserver/pkg/apis/cel/config.go as input.
- Validate(ctx context.Context, versionedAttr *admission.VersionedAttributes, versionedParams runtime.Object, runtimeCELCostBudget int64) ValidateResult
+ Validate(ctx context.Context, matchedResource schema.GroupVersionResource, versionedAttr *admission.VersionedAttributes, versionedParams runtime.Object, namespace *corev1.Namespace, runtimeCELCostBudget int64, authz authorizer.Authorizer) ValidateResult
}
diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/validatingadmissionpolicy/matcher.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/validatingadmissionpolicy/matcher.go
index a659a99f14..397f2c2671 100644
--- a/vendor/k8s.io/apiserver/pkg/admission/plugin/validatingadmissionpolicy/matcher.go
+++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/validatingadmissionpolicy/matcher.go
@@ -17,7 +17,8 @@ limitations under the License.
package validatingadmissionpolicy
import (
- "k8s.io/api/admissionregistration/v1alpha1"
+ "k8s.io/api/admissionregistration/v1beta1"
+ corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime/schema"
@@ -28,7 +29,7 @@ import (
var _ matching.MatchCriteria = &matchCriteria{}
type matchCriteria struct {
- constraints *v1alpha1.MatchResources
+ constraints *v1beta1.MatchResources
}
// GetParsedNamespaceSelector returns the converted LabelSelector which implements labels.Selector
@@ -42,7 +43,7 @@ func (m *matchCriteria) GetParsedObjectSelector() (labels.Selector, error) {
}
// GetMatchResources returns the matchConstraints
-func (m *matchCriteria) GetMatchResources() v1alpha1.MatchResources {
+func (m *matchCriteria) GetMatchResources() v1beta1.MatchResources {
return *m.constraints
}
@@ -62,17 +63,21 @@ func (c *matcher) ValidateInitialization() error {
}
// DefinitionMatches returns whether this ValidatingAdmissionPolicy matches the provided admission resource request
-func (c *matcher) DefinitionMatches(a admission.Attributes, o admission.ObjectInterfaces, definition *v1alpha1.ValidatingAdmissionPolicy) (bool, schema.GroupVersionKind, error) {
+func (c *matcher) DefinitionMatches(a admission.Attributes, o admission.ObjectInterfaces, definition *v1beta1.ValidatingAdmissionPolicy) (bool, schema.GroupVersionResource, schema.GroupVersionKind, error) {
criteria := matchCriteria{constraints: definition.Spec.MatchConstraints}
return c.Matcher.Matches(a, o, &criteria)
}
// BindingMatches returns whether this ValidatingAdmissionPolicyBinding matches the provided admission resource request
-func (c *matcher) BindingMatches(a admission.Attributes, o admission.ObjectInterfaces, binding *v1alpha1.ValidatingAdmissionPolicyBinding) (bool, error) {
+func (c *matcher) BindingMatches(a admission.Attributes, o admission.ObjectInterfaces, binding *v1beta1.ValidatingAdmissionPolicyBinding) (bool, error) {
if binding.Spec.MatchResources == nil {
return true, nil
}
criteria := matchCriteria{constraints: binding.Spec.MatchResources}
- isMatch, _, err := c.Matcher.Matches(a, o, &criteria)
+ isMatch, _, _, err := c.Matcher.Matches(a, o, &criteria)
return isMatch, err
}
+
+func (c *matcher) GetNamespace(name string) (*corev1.Namespace, error) {
+ return c.Matcher.GetNamespace(name)
+}
diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/validatingadmissionpolicy/matching/matching.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/validatingadmissionpolicy/matching/matching.go
index c4f7e64af2..ebdb61db88 100644
--- a/vendor/k8s.io/apiserver/pkg/admission/plugin/validatingadmissionpolicy/matching/matching.go
+++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/validatingadmissionpolicy/matching/matching.go
@@ -20,7 +20,8 @@ import (
"fmt"
v1 "k8s.io/api/admissionregistration/v1"
- "k8s.io/api/admissionregistration/v1alpha1"
+ "k8s.io/api/admissionregistration/v1beta1"
+ corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apiserver/pkg/admission"
"k8s.io/client-go/kubernetes"
@@ -35,7 +36,7 @@ type MatchCriteria interface {
namespace.NamespaceSelectorProvider
object.ObjectSelectorProvider
- GetMatchResources() v1alpha1.MatchResources
+ GetMatchResources() v1beta1.MatchResources
}
// Matcher decides if a request matches against matchCriteria
@@ -44,6 +45,10 @@ type Matcher struct {
objectMatcher *object.Matcher
}
+func (m *Matcher) GetNamespace(name string) (*corev1.Namespace, error) {
+ return m.namespaceMatcher.GetNamespace(name)
+}
+
// NewMatcher initialize the matcher with dependencies requires
func NewMatcher(
namespaceLister listersv1.NamespaceLister,
@@ -66,56 +71,60 @@ func (m *Matcher) ValidateInitialization() error {
return nil
}
-func (m *Matcher) Matches(attr admission.Attributes, o admission.ObjectInterfaces, criteria MatchCriteria) (bool, schema.GroupVersionKind, error) {
+func (m *Matcher) Matches(attr admission.Attributes, o admission.ObjectInterfaces, criteria MatchCriteria) (bool, schema.GroupVersionResource, schema.GroupVersionKind, error) {
matches, matchNsErr := m.namespaceMatcher.MatchNamespaceSelector(criteria, attr)
// Should not return an error here for policy which do not apply to the request, even if err is an unexpected scenario.
if !matches && matchNsErr == nil {
- return false, schema.GroupVersionKind{}, nil
+ return false, schema.GroupVersionResource{}, schema.GroupVersionKind{}, nil
}
matches, matchObjErr := m.objectMatcher.MatchObjectSelector(criteria, attr)
// Should not return an error here for policy which do not apply to the request, even if err is an unexpected scenario.
if !matches && matchObjErr == nil {
- return false, schema.GroupVersionKind{}, nil
+ return false, schema.GroupVersionResource{}, schema.GroupVersionKind{}, nil
}
matchResources := criteria.GetMatchResources()
matchPolicy := matchResources.MatchPolicy
- if isExcluded, _, err := matchesResourceRules(matchResources.ExcludeResourceRules, matchPolicy, attr, o); isExcluded || err != nil {
- return false, schema.GroupVersionKind{}, err
+ if isExcluded, _, _, err := matchesResourceRules(matchResources.ExcludeResourceRules, matchPolicy, attr, o); isExcluded || err != nil {
+ return false, schema.GroupVersionResource{}, schema.GroupVersionKind{}, err
}
var (
- isMatch bool
- matchKind schema.GroupVersionKind
- matchErr error
+ isMatch bool
+ matchResource schema.GroupVersionResource
+ matchKind schema.GroupVersionKind
+ matchErr error
)
if len(matchResources.ResourceRules) == 0 {
isMatch = true
matchKind = attr.GetKind()
+ matchResource = attr.GetResource()
} else {
- isMatch, matchKind, matchErr = matchesResourceRules(matchResources.ResourceRules, matchPolicy, attr, o)
+ isMatch, matchResource, matchKind, matchErr = matchesResourceRules(matchResources.ResourceRules, matchPolicy, attr, o)
}
if matchErr != nil {
- return false, schema.GroupVersionKind{}, matchErr
+ return false, schema.GroupVersionResource{}, schema.GroupVersionKind{}, matchErr
}
if !isMatch {
- return false, schema.GroupVersionKind{}, nil
+ return false, schema.GroupVersionResource{}, schema.GroupVersionKind{}, nil
}
// now that we know this applies to this request otherwise, if there were selector errors, return them
if matchNsErr != nil {
- return false, schema.GroupVersionKind{}, matchNsErr
+ return false, schema.GroupVersionResource{}, schema.GroupVersionKind{}, matchNsErr
}
if matchObjErr != nil {
- return false, schema.GroupVersionKind{}, matchObjErr
+ return false, schema.GroupVersionResource{}, schema.GroupVersionKind{}, matchObjErr
}
- return true, matchKind, nil
+ return true, matchResource, matchKind, nil
}
-func matchesResourceRules(namedRules []v1alpha1.NamedRuleWithOperations, matchPolicy *v1alpha1.MatchPolicyType, attr admission.Attributes, o admission.ObjectInterfaces) (bool, schema.GroupVersionKind, error) {
+func matchesResourceRules(namedRules []v1beta1.NamedRuleWithOperations, matchPolicy *v1beta1.MatchPolicyType, attr admission.Attributes, o admission.ObjectInterfaces) (bool, schema.GroupVersionResource, schema.GroupVersionKind, error) {
matchKind := attr.GetKind()
+ matchResource := attr.GetResource()
+
for _, namedRule := range namedRules {
rule := v1.RuleWithOperations(namedRule.RuleWithOperations)
ruleMatcher := rules.Matcher{
@@ -127,22 +136,22 @@ func matchesResourceRules(namedRules []v1alpha1.NamedRuleWithOperations, matchPo
}
// an empty name list always matches
if len(namedRule.ResourceNames) == 0 {
- return true, matchKind, nil
+ return true, matchResource, matchKind, nil
}
// TODO: GetName() can return an empty string if the user is relying on
// the API server to generate the name... figure out what to do for this edge case
name := attr.GetName()
for _, matchedName := range namedRule.ResourceNames {
if name == matchedName {
- return true, matchKind, nil
+ return true, matchResource, matchKind, nil
}
}
}
// if match policy is undefined or exact, don't perform fuzzy matching
// note that defaulting to fuzzy matching is set by the API
- if matchPolicy == nil || *matchPolicy == v1alpha1.Exact {
- return false, schema.GroupVersionKind{}, nil
+ if matchPolicy == nil || *matchPolicy == v1beta1.Exact {
+ return false, schema.GroupVersionResource{}, schema.GroupVersionKind{}, nil
}
attrWithOverride := &attrWithResourceOverride{Attributes: attr}
@@ -164,11 +173,11 @@ func matchesResourceRules(namedRules []v1alpha1.NamedRuleWithOperations, matchPo
}
matchKind = o.GetEquivalentResourceMapper().KindFor(equivalent, attr.GetSubresource())
if matchKind.Empty() {
- return false, schema.GroupVersionKind{}, fmt.Errorf("unable to convert to %v: unknown kind", equivalent)
+ return false, schema.GroupVersionResource{}, schema.GroupVersionKind{}, fmt.Errorf("unable to convert to %v: unknown kind", equivalent)
}
// an empty name list always matches
if len(namedRule.ResourceNames) == 0 {
- return true, matchKind, nil
+ return true, equivalent, matchKind, nil
}
// TODO: GetName() can return an empty string if the user is relying on
@@ -176,12 +185,12 @@ func matchesResourceRules(namedRules []v1alpha1.NamedRuleWithOperations, matchPo
name := attr.GetName()
for _, matchedName := range namedRule.ResourceNames {
if name == matchedName {
- return true, matchKind, nil
+ return true, equivalent, matchKind, nil
}
}
}
}
- return false, schema.GroupVersionKind{}, nil
+ return false, schema.GroupVersionResource{}, schema.GroupVersionKind{}, nil
}
type attrWithResourceOverride struct {
diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/validatingadmissionpolicy/typechecking.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/validatingadmissionpolicy/typechecking.go
index 7b128e3818..6d73e237b0 100644
--- a/vendor/k8s.io/apiserver/pkg/admission/plugin/validatingadmissionpolicy/typechecking.go
+++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/validatingadmissionpolicy/typechecking.go
@@ -21,19 +21,20 @@ import (
"fmt"
"sort"
"strings"
- "sync"
+ "time"
"github.com/google/cel-go/cel"
- "github.com/google/cel-go/common/types/ref"
- "k8s.io/api/admissionregistration/v1alpha1"
+ "k8s.io/api/admissionregistration/v1beta1"
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/validation/field"
+ "k8s.io/apimachinery/pkg/util/version"
plugincel "k8s.io/apiserver/pkg/admission/plugin/cel"
apiservercel "k8s.io/apiserver/pkg/cel"
"k8s.io/apiserver/pkg/cel/common"
+ "k8s.io/apiserver/pkg/cel/environment"
"k8s.io/apiserver/pkg/cel/library"
"k8s.io/apiserver/pkg/cel/openapi"
"k8s.io/apiserver/pkg/cel/openapi/resolver"
@@ -43,8 +44,17 @@ import (
const maxTypesToCheck = 10
type TypeChecker struct {
- schemaResolver resolver.SchemaResolver
- restMapper meta.RESTMapper
+ SchemaResolver resolver.SchemaResolver
+ RestMapper meta.RESTMapper
+}
+
+// TypeCheckingContext holds information about the policy being type-checked.
+// The struct is opaque to the caller.
+type TypeCheckingContext struct {
+ gvks []schema.GroupVersionKind
+ declTypes []*apiservercel.DeclType
+ paramGVK schema.GroupVersionKind
+ paramDeclType *apiservercel.DeclType
}
type typeOverwrite struct {
@@ -52,127 +62,148 @@ type typeOverwrite struct {
params *apiservercel.DeclType
}
-// typeCheckingResult holds the issues found during type checking, any returned
+// TypeCheckingResult holds the issues found during type checking, any returned
// error, and the gvk that the type checking is performed against.
-type typeCheckingResult struct {
- gvk schema.GroupVersionKind
+type TypeCheckingResult struct {
+ // GVK is the associated GVK
+ GVK schema.GroupVersionKind
+ // Issues contain machine-readable information about the typechecking result.
+ Issues *cel.Issues
+ // Err is the possible error that was encounter during type checking.
+ Err error
+}
+
+// TypeCheckingResults is a collection of TypeCheckingResult
+type TypeCheckingResults []*TypeCheckingResult
- issues *cel.Issues
- err error
+func (rs TypeCheckingResults) String() string {
+ var messages []string
+ for _, r := range rs {
+ message := r.String()
+ if message != "" {
+ messages = append(messages, message)
+ }
+ }
+ return strings.Join(messages, "\n")
+}
+
+// String converts the result to human-readable form as a string.
+func (r *TypeCheckingResult) String() string {
+ if r.Issues == nil && r.Err == nil {
+ return ""
+ }
+ if r.Err != nil {
+ return fmt.Sprintf("%v: type checking error: %v\n", r.GVK, r.Err)
+ }
+ return fmt.Sprintf("%v: %s\n", r.GVK, r.Issues)
}
// Check preforms the type check against the given policy, and format the result
// as []ExpressionWarning that is ready to be set in policy.Status
// The result is nil if type checking returns no warning.
// The policy object is NOT mutated. The caller should update Status accordingly
-func (c *TypeChecker) Check(policy *v1alpha1.ValidatingAdmissionPolicy) []v1alpha1.ExpressionWarning {
- exps := make([]string, 0, len(policy.Spec.Validations))
- // check main validation expressions, located in spec.validations[*]
+func (c *TypeChecker) Check(policy *v1beta1.ValidatingAdmissionPolicy) []v1beta1.ExpressionWarning {
+ ctx := c.CreateContext(policy)
+
+ // warnings to return, note that the capacity is optimistically set to zero
+ var warnings []v1beta1.ExpressionWarning // intentionally not setting capacity
+
+ // check main validation expressions and their message expressions, located in spec.validations[*]
fieldRef := field.NewPath("spec", "validations")
- for _, v := range policy.Spec.Validations {
- exps = append(exps, v.Expression)
- }
- msgs := c.CheckExpressions(exps, policy.Spec.ParamKind != nil, policy)
- var results []v1alpha1.ExpressionWarning // intentionally not setting capacity
- for i, msg := range msgs {
- if msg != "" {
- results = append(results, v1alpha1.ExpressionWarning{
+ for i, v := range policy.Spec.Validations {
+ results := c.CheckExpression(ctx, v.Expression)
+ if len(results) != 0 {
+ warnings = append(warnings, v1beta1.ExpressionWarning{
FieldRef: fieldRef.Index(i).Child("expression").String(),
- Warning: msg,
+ Warning: results.String(),
+ })
+ }
+ // Note that MessageExpression is optional
+ if v.MessageExpression == "" {
+ continue
+ }
+ results = c.CheckExpression(ctx, v.MessageExpression)
+ if len(results) != 0 {
+ warnings = append(warnings, v1beta1.ExpressionWarning{
+ FieldRef: fieldRef.Index(i).Child("messageExpression").String(),
+ Warning: results.String(),
})
}
}
- return results
+
+ return warnings
}
-// CheckExpressions checks a set of compiled CEL programs against the GVKs defined in
-// policy.Spec.MatchConstraints
-// The result is a human-readable form that describe which expressions
-// violate what types at what place. The indexes of the return []string
-// matches these of the input expressions.
-// TODO: It is much more useful to have machine-readable output and let the
-// client format it. That requires an update to the KEP, probably in coming
-// releases.
-func (c *TypeChecker) CheckExpressions(expressions []string, hasParams bool, policy *v1alpha1.ValidatingAdmissionPolicy) []string {
- var allWarnings []string
+// CreateContext resolves all types and their schemas from a policy definition and creates the context.
+func (c *TypeChecker) CreateContext(policy *v1beta1.ValidatingAdmissionPolicy) *TypeCheckingContext {
+ ctx := new(TypeCheckingContext)
allGvks := c.typesToCheck(policy)
gvks := make([]schema.GroupVersionKind, 0, len(allGvks))
- schemas := make([]common.Schema, 0, len(allGvks))
+ declTypes := make([]*apiservercel.DeclType, 0, len(allGvks))
for _, gvk := range allGvks {
- s, err := c.schemaResolver.ResolveSchema(gvk)
+ declType, err := c.declType(gvk)
if err != nil {
// type checking errors MUST NOT alter the behavior of the policy
// even if an error occurs.
if !errors.Is(err, resolver.ErrSchemaNotFound) {
// Anything except ErrSchemaNotFound is an internal error
- klog.ErrorS(err, "internal error: schema resolution failure", "gvk", gvk)
+ klog.V(2).ErrorS(err, "internal error: schema resolution failure", "gvk", gvk)
}
- // skip if an unrecoverable error occurs.
+ // skip for not found or internal error
continue
}
gvks = append(gvks, gvk)
- schemas = append(schemas, &openapi.Schema{Schema: s})
+ declTypes = append(declTypes, declType)
}
+ ctx.gvks = gvks
+ ctx.declTypes = declTypes
- paramsType := c.paramsType(policy)
- paramsDeclType, err := c.declType(paramsType)
+ paramsGVK := c.paramsGVK(policy) // maybe empty, correctly handled
+ paramsDeclType, err := c.declType(paramsGVK)
if err != nil {
if !errors.Is(err, resolver.ErrSchemaNotFound) {
- klog.V(2).ErrorS(err, "cannot resolve schema for params", "gvk", paramsType)
+ klog.V(2).ErrorS(err, "internal error: cannot resolve schema for params", "gvk", paramsGVK)
}
paramsDeclType = nil
}
+ ctx.paramGVK = paramsGVK
+ ctx.paramDeclType = paramsDeclType
+ return ctx
+}
- for _, exp := range expressions {
- var results []typeCheckingResult
- for i, gvk := range gvks {
- s := schemas[i]
- issues, err := c.checkExpression(exp, hasParams, typeOverwrite{
- object: common.SchemaDeclType(s, true),
- params: paramsDeclType,
- })
- // save even if no issues are found, for the sake of formatting.
- results = append(results, typeCheckingResult{
- gvk: gvk,
- issues: issues,
- err: err,
- })
+// CheckExpression type checks a single expression, given the context
+func (c *TypeChecker) CheckExpression(ctx *TypeCheckingContext, expression string) TypeCheckingResults {
+ var results TypeCheckingResults
+ for i, gvk := range ctx.gvks {
+ declType := ctx.declTypes[i]
+ // TODO(jiahuif) hasAuthorizer always true for now, will change after expending type checking to all fields.
+ issues, err := c.checkExpression(expression, ctx.paramDeclType != nil, true, typeOverwrite{
+ object: declType,
+ params: ctx.paramDeclType,
+ })
+ if issues != nil || err != nil {
+ results = append(results, &TypeCheckingResult{Issues: issues, Err: err, GVK: gvk})
}
- allWarnings = append(allWarnings, c.formatWarning(results))
}
-
- return allWarnings
+ return results
}
-// formatWarning converts the resulting issues and possible error during
-// type checking into a human-readable string
-func (c *TypeChecker) formatWarning(results []typeCheckingResult) string {
- var sb strings.Builder
- for _, result := range results {
- if result.issues == nil && result.err == nil {
- continue
- }
- if result.err != nil {
- sb.WriteString(fmt.Sprintf("%v: type checking error: %v\n", result.gvk, result.err))
- } else {
- sb.WriteString(fmt.Sprintf("%v: %s\n", result.gvk, result.issues))
- }
- }
- return strings.TrimSuffix(sb.String(), "\n")
+func generateUniqueTypeName(kind string) string {
+ return fmt.Sprintf("%s%d", kind, time.Now().Nanosecond())
}
func (c *TypeChecker) declType(gvk schema.GroupVersionKind) (*apiservercel.DeclType, error) {
if gvk.Empty() {
return nil, nil
}
- s, err := c.schemaResolver.ResolveSchema(gvk)
+ s, err := c.SchemaResolver.ResolveSchema(gvk)
if err != nil {
return nil, err
}
- return common.SchemaDeclType(&openapi.Schema{Schema: s}, true), nil
+ return common.SchemaDeclType(&openapi.Schema{Schema: s}, true).MaybeAssignTypeName(generateUniqueTypeName(gvk.Kind)), nil
}
-func (c *TypeChecker) paramsType(policy *v1alpha1.ValidatingAdmissionPolicy) schema.GroupVersionKind {
+func (c *TypeChecker) paramsGVK(policy *v1beta1.ValidatingAdmissionPolicy) schema.GroupVersionKind {
if policy.Spec.ParamKind == nil {
return schema.GroupVersionKind{}
}
@@ -183,8 +214,8 @@ func (c *TypeChecker) paramsType(policy *v1alpha1.ValidatingAdmissionPolicy) sch
return gv.WithKind(policy.Spec.ParamKind.Kind)
}
-func (c *TypeChecker) checkExpression(expression string, hasParams bool, types typeOverwrite) (*cel.Issues, error) {
- env, err := buildEnv(hasParams, types)
+func (c *TypeChecker) checkExpression(expression string, hasParams, hasAuthorizer bool, types typeOverwrite) (*cel.Issues, error) {
+ env, err := buildEnv(hasParams, hasAuthorizer, types)
if err != nil {
return nil, err
}
@@ -202,7 +233,7 @@ func (c *TypeChecker) checkExpression(expression string, hasParams bool, types t
// typesToCheck extracts a list of GVKs that needs type checking from the policy
// the result is sorted in the order of Group, Version, and Kind
-func (c *TypeChecker) typesToCheck(p *v1alpha1.ValidatingAdmissionPolicy) []schema.GroupVersionKind {
+func (c *TypeChecker) typesToCheck(p *v1beta1.ValidatingAdmissionPolicy) []schema.GroupVersionKind {
gvks := sets.New[schema.GroupVersionKind]()
if p.Spec.MatchConstraints == nil || len(p.Spec.MatchConstraints.ResourceRules) == 0 {
return nil
@@ -235,7 +266,7 @@ func (c *TypeChecker) typesToCheck(p *v1alpha1.ValidatingAdmissionPolicy) []sche
Version: version,
Resource: resource,
}
- resolved, err := c.restMapper.KindsFor(gvr)
+ resolved, err := c.RestMapper.KindsFor(gvr)
if err != nil {
continue
}
@@ -263,7 +294,7 @@ func (c *TypeChecker) typesToCheck(p *v1alpha1.ValidatingAdmissionPolicy) []sche
return sortGVKList(gvks.UnsortedList())
}
-func extractGroups(rule *v1alpha1.Rule) []string {
+func extractGroups(rule *v1beta1.Rule) []string {
groups := make([]string, 0, len(rule.APIGroups))
for _, group := range rule.APIGroups {
// give up if wildcard
@@ -275,7 +306,7 @@ func extractGroups(rule *v1alpha1.Rule) []string {
return groups
}
-func extractVersions(rule *v1alpha1.Rule) []string {
+func extractVersions(rule *v1beta1.Rule) []string {
versions := make([]string, 0, len(rule.APIVersions))
for _, version := range rule.APIVersions {
if strings.ContainsAny(version, "*") {
@@ -286,7 +317,7 @@ func extractVersions(rule *v1alpha1.Rule) []string {
return versions
}
-func extractResources(rule *v1alpha1.Rule) []string {
+func extractResources(rule *v1beta1.Rule) []string {
resources := make([]string, 0, len(rule.Resources))
for _, resource := range rule.Resources {
// skip wildcard and subresources
@@ -313,123 +344,64 @@ func sortGVKList(list []schema.GroupVersionKind) []schema.GroupVersionKind {
return list
}
-func buildEnv(hasParams bool, types typeOverwrite) (*cel.Env, error) {
- baseEnv, err := getBaseEnv()
- if err != nil {
- return nil, err
- }
- reg := apiservercel.NewRegistry(baseEnv)
+func buildEnv(hasParams bool, hasAuthorizer bool, types typeOverwrite) (*cel.Env, error) {
+ baseEnv := environment.MustBaseEnvSet(environment.DefaultCompatibilityVersion())
requestType := plugincel.BuildRequestType()
+ namespaceType := plugincel.BuildNamespaceType()
var varOpts []cel.EnvOption
- var rts []*apiservercel.RuleTypes
+ var declTypes []*apiservercel.DeclType
+
+ // namespace, hand-crafted type
+ declTypes = append(declTypes, namespaceType)
+ varOpts = append(varOpts, createVariableOpts(namespaceType, plugincel.NamespaceVarName)...)
// request, hand-crafted type
- rt, opts, err := createRuleTypesAndOptions(reg, requestType, plugincel.RequestVarName)
- if err != nil {
- return nil, err
- }
- rts = append(rts, rt)
- varOpts = append(varOpts, opts...)
+ declTypes = append(declTypes, requestType)
+ varOpts = append(varOpts, createVariableOpts(requestType, plugincel.RequestVarName)...)
// object and oldObject, same type, type(s) resolved from constraints
- rt, opts, err = createRuleTypesAndOptions(reg, types.object, plugincel.ObjectVarName, plugincel.OldObjectVarName)
- if err != nil {
- return nil, err
- }
- rts = append(rts, rt)
- varOpts = append(varOpts, opts...)
+ declTypes = append(declTypes, types.object)
+ varOpts = append(varOpts, createVariableOpts(types.object, plugincel.ObjectVarName, plugincel.OldObjectVarName)...)
// params, defined by ParamKind
- if hasParams {
- rt, opts, err := createRuleTypesAndOptions(reg, types.params, plugincel.ParamsVarName)
- if err != nil {
- return nil, err
- }
- rts = append(rts, rt)
- varOpts = append(varOpts, opts...)
+ if hasParams && types.params != nil {
+ declTypes = append(declTypes, types.params)
+ varOpts = append(varOpts, createVariableOpts(types.params, plugincel.ParamsVarName)...)
}
- opts, err = ruleTypesOpts(rts, baseEnv.TypeProvider())
- if err != nil {
- return nil, err
+ // authorizer, implicitly available to all expressions of a policy
+ if hasAuthorizer {
+ // we only need its structure but not the variable itself
+ varOpts = append(varOpts, cel.Variable("authorizer", library.AuthorizerType))
}
- opts = append(opts, varOpts...) // add variables after ruleTypes.
- env, err := baseEnv.Extend(opts...)
+
+ env, err := baseEnv.Extend(
+ environment.VersionedOptions{
+ // Feature epoch was actually 1.26, but we artificially set it to 1.0 because these
+ // options should always be present.
+ IntroducedVersion: version.MajorMinor(1, 0),
+ EnvOptions: varOpts,
+ DeclTypes: declTypes,
+ },
+ )
if err != nil {
return nil, err
}
- return env, nil
+ return env.Env(environment.StoredExpressions)
}
-// createRuleTypeAndOptions creates the cel RuleTypes and a slice of EnvOption
+// createVariableOpts creates a slice of EnvOption
// that can be used for creating a CEL env containing variables of declType.
// declType can be nil, in which case the variables will be of DynType.
-func createRuleTypesAndOptions(registry *apiservercel.Registry, declType *apiservercel.DeclType, variables ...string) (*apiservercel.RuleTypes, []cel.EnvOption, error) {
+func createVariableOpts(declType *apiservercel.DeclType, variables ...string) []cel.EnvOption {
opts := make([]cel.EnvOption, 0, len(variables))
- // untyped, use DynType
- if declType == nil {
- for _, v := range variables {
- opts = append(opts, cel.Variable(v, cel.DynType))
- }
- return nil, opts, nil
- }
- // create a RuleType for the given type
- rt, err := apiservercel.NewRuleTypes(declType.TypeName(), declType, registry)
- if err != nil {
- return nil, nil, err
- }
- if rt == nil {
- return nil, nil, nil
+ t := cel.DynType
+ if declType != nil {
+ t = declType.CelType()
}
for _, v := range variables {
- opts = append(opts, cel.Variable(v, declType.CelType()))
- }
- return rt, opts, nil
-}
-
-func ruleTypesOpts(ruleTypes []*apiservercel.RuleTypes, underlyingTypeProvider ref.TypeProvider) ([]cel.EnvOption, error) {
- var providers []ref.TypeProvider // may be unused, too small to matter
- var adapters []ref.TypeAdapter
- for _, rt := range ruleTypes {
- if rt != nil {
- withTP, err := rt.WithTypeProvider(underlyingTypeProvider)
- if err != nil {
- return nil, err
- }
- providers = append(providers, withTP)
- adapters = append(adapters, withTP)
- }
- }
- var tp ref.TypeProvider
- var ta ref.TypeAdapter
- switch len(providers) {
- case 0:
- return nil, nil
- case 1:
- tp = providers[0]
- ta = adapters[0]
- default:
- tp = &apiservercel.CompositedTypeProvider{Providers: providers}
- ta = &apiservercel.CompositedTypeAdapter{Adapters: adapters}
+ opts = append(opts, cel.Variable(v, t))
}
- return []cel.EnvOption{cel.CustomTypeProvider(tp), cel.CustomTypeAdapter(ta)}, nil
+ return opts
}
-
-func getBaseEnv() (*cel.Env, error) {
- typeCheckingBaseEnvInit.Do(func() {
- var opts []cel.EnvOption
- opts = append(opts, cel.HomogeneousAggregateLiterals())
- // Validate function declarations once during base env initialization,
- // so they don't need to be evaluated each time a CEL rule is compiled.
- // This is a relatively expensive operation.
- opts = append(opts, cel.EagerlyValidateDeclarations(true), cel.DefaultUTCTimeZone(true))
- opts = append(opts, library.ExtensionLibs...)
- typeCheckingBaseEnv, typeCheckingBaseEnvError = cel.NewEnv(opts...)
- })
- return typeCheckingBaseEnv, typeCheckingBaseEnvError
-}
-
-var typeCheckingBaseEnv *cel.Env
-var typeCheckingBaseEnvError error
-var typeCheckingBaseEnvInit sync.Once
diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/validatingadmissionpolicy/validator.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/validatingadmissionpolicy/validator.go
index 448750c919..9630a49747 100644
--- a/vendor/k8s.io/apiserver/pkg/admission/plugin/validatingadmissionpolicy/validator.go
+++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/validatingadmissionpolicy/validator.go
@@ -24,8 +24,10 @@ import (
celtypes "github.com/google/cel-go/common/types"
v1 "k8s.io/api/admissionregistration/v1"
+ corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apiserver/pkg/admission"
"k8s.io/apiserver/pkg/admission/plugin/cel"
"k8s.io/apiserver/pkg/admission/plugin/webhook/matchconditions"
@@ -42,17 +44,15 @@ type validator struct {
auditAnnotationFilter cel.Filter
messageFilter cel.Filter
failPolicy *v1.FailurePolicyType
- authorizer authorizer.Authorizer
}
-func NewValidator(validationFilter cel.Filter, celMatcher matchconditions.Matcher, auditAnnotationFilter, messageFilter cel.Filter, failPolicy *v1.FailurePolicyType, authorizer authorizer.Authorizer) Validator {
+func NewValidator(validationFilter cel.Filter, celMatcher matchconditions.Matcher, auditAnnotationFilter, messageFilter cel.Filter, failPolicy *v1.FailurePolicyType) Validator {
return &validator{
celMatcher: celMatcher,
validationFilter: validationFilter,
auditAnnotationFilter: auditAnnotationFilter,
messageFilter: messageFilter,
failPolicy: failPolicy,
- authorizer: authorizer,
}
}
@@ -72,7 +72,8 @@ func auditAnnotationEvaluationForError(f v1.FailurePolicyType) PolicyAuditAnnota
// Validate takes a list of Evaluation and a failure policy and converts them into actionable PolicyDecisions
// runtimeCELCostBudget was added for testing purpose only. Callers should always use const RuntimeCELCostBudget from k8s.io/apiserver/pkg/apis/cel/config.go as input.
-func (v *validator) Validate(ctx context.Context, versionedAttr *admission.VersionedAttributes, versionedParams runtime.Object, runtimeCELCostBudget int64) ValidateResult {
+
+func (v *validator) Validate(ctx context.Context, matchedResource schema.GroupVersionResource, versionedAttr *admission.VersionedAttributes, versionedParams runtime.Object, namespace *corev1.Namespace, runtimeCELCostBudget int64, authz authorizer.Authorizer) ValidateResult {
var f v1.FailurePolicyType
if v.failPolicy == nil {
f = v1.Fail
@@ -81,7 +82,7 @@ func (v *validator) Validate(ctx context.Context, versionedAttr *admission.Versi
}
if v.celMatcher != nil {
- matchResults := v.celMatcher.Match(ctx, versionedAttr, versionedParams)
+ matchResults := v.celMatcher.Match(ctx, versionedAttr, versionedParams, authz)
if matchResults.Error != nil {
return ValidateResult{
Decisions: []PolicyDecision{
@@ -100,10 +101,12 @@ func (v *validator) Validate(ctx context.Context, versionedAttr *admission.Versi
}
}
- optionalVars := cel.OptionalVariableBindings{VersionedParams: versionedParams, Authorizer: v.authorizer}
+ optionalVars := cel.OptionalVariableBindings{VersionedParams: versionedParams, Authorizer: authz}
expressionOptionalVars := cel.OptionalVariableBindings{VersionedParams: versionedParams}
- admissionRequest := cel.CreateAdmissionRequest(versionedAttr.Attributes)
- evalResults, remainingBudget, err := v.validationFilter.ForInput(ctx, versionedAttr, admissionRequest, optionalVars, runtimeCELCostBudget)
+ admissionRequest := cel.CreateAdmissionRequest(versionedAttr.Attributes, metav1.GroupVersionResource(matchedResource), metav1.GroupVersionKind(versionedAttr.VersionedKind))
+ // Decide which fields are exposed
+ ns := cel.CreateNamespaceObject(namespace)
+ evalResults, remainingBudget, err := v.validationFilter.ForInput(ctx, versionedAttr, admissionRequest, optionalVars, ns, runtimeCELCostBudget)
if err != nil {
return ValidateResult{
Decisions: []PolicyDecision{
@@ -116,7 +119,7 @@ func (v *validator) Validate(ctx context.Context, versionedAttr *admission.Versi
}
}
decisions := make([]PolicyDecision, len(evalResults))
- messageResults, _, err := v.messageFilter.ForInput(ctx, versionedAttr, admissionRequest, expressionOptionalVars, remainingBudget)
+ messageResults, _, err := v.messageFilter.ForInput(ctx, versionedAttr, admissionRequest, expressionOptionalVars, ns, remainingBudget)
for i, evalResult := range evalResults {
var decision = &decisions[i]
// TODO: move this to generics
@@ -193,7 +196,7 @@ func (v *validator) Validate(ctx context.Context, versionedAttr *admission.Versi
}
options := cel.OptionalVariableBindings{VersionedParams: versionedParams}
- auditAnnotationEvalResults, _, err := v.auditAnnotationFilter.ForInput(ctx, versionedAttr, cel.CreateAdmissionRequest(versionedAttr.Attributes), options, runtimeCELCostBudget)
+ auditAnnotationEvalResults, _, err := v.auditAnnotationFilter.ForInput(ctx, versionedAttr, admissionRequest, options, namespace, runtimeCELCostBudget)
if err != nil {
return ValidateResult{
Decisions: []PolicyDecision{
diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/accessors.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/accessors.go
index 102597cbcc..e60d245a62 100644
--- a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/accessors.go
+++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/accessors.go
@@ -26,8 +26,7 @@ import (
"k8s.io/apiserver/pkg/admission/plugin/webhook/matchconditions"
"k8s.io/apiserver/pkg/admission/plugin/webhook/predicates/namespace"
"k8s.io/apiserver/pkg/admission/plugin/webhook/predicates/object"
- celconfig "k8s.io/apiserver/pkg/apis/cel"
- "k8s.io/apiserver/pkg/authorization/authorizer"
+ "k8s.io/apiserver/pkg/cel/environment"
webhookutil "k8s.io/apiserver/pkg/util/webhook"
"k8s.io/client-go/rest"
)
@@ -49,7 +48,7 @@ type WebhookAccessor interface {
GetRESTClient(clientManager *webhookutil.ClientManager) (*rest.RESTClient, error)
// GetCompiledMatcher gets the compiled matcher object
- GetCompiledMatcher(compiler cel.FilterCompiler, authorizer authorizer.Authorizer) matchconditions.Matcher
+ GetCompiledMatcher(compiler cel.FilterCompiler) matchconditions.Matcher
// GetName gets the webhook Name field. Note that the name is scoped to the webhook
// configuration and does not provide a globally unique identity, if a unique identity is
@@ -81,6 +80,9 @@ type WebhookAccessor interface {
GetMutatingWebhook() (*v1.MutatingWebhook, bool)
// GetValidatingWebhook if the accessor contains a ValidatingWebhook, returns it and true, else returns false.
GetValidatingWebhook() (*v1.ValidatingWebhook, bool)
+
+ // GetType returns the type of the accessor (validate or admit)
+ GetType() string
}
// NewMutatingWebhookAccessor creates an accessor for a MutatingWebhook.
@@ -124,8 +126,11 @@ func (m *mutatingWebhookAccessor) GetRESTClient(clientManager *webhookutil.Clien
return m.client, m.clientErr
}
-// TODO: graduation to beta: resolve the fact that we rebuild ALL items whenever ANY config changes in NewMutatingWebhookConfigurationManager and NewValidatingWebhookConfigurationManager ... now that we're doing CEL compilation, we probably want to avoid that
-func (m *mutatingWebhookAccessor) GetCompiledMatcher(compiler cel.FilterCompiler, authorizer authorizer.Authorizer) matchconditions.Matcher {
+func (m *mutatingWebhookAccessor) GetType() string {
+ return "admit"
+}
+
+func (m *mutatingWebhookAccessor) GetCompiledMatcher(compiler cel.FilterCompiler) matchconditions.Matcher {
m.compileMatcher.Do(func() {
expressions := make([]cel.ExpressionAccessor, len(m.MutatingWebhook.MatchConditions))
for i, matchCondition := range m.MutatingWebhook.MatchConditions {
@@ -140,8 +145,8 @@ func (m *mutatingWebhookAccessor) GetCompiledMatcher(compiler cel.FilterCompiler
HasParams: false,
HasAuthorizer: true,
},
- celconfig.PerCallLimit,
- ), authorizer, m.FailurePolicy, "validating", m.Name)
+ environment.StoredExpressions,
+ ), m.FailurePolicy, "webhook", "admit", m.Name)
})
return m.compiledMatcher
}
@@ -253,7 +258,7 @@ func (v *validatingWebhookAccessor) GetRESTClient(clientManager *webhookutil.Cli
return v.client, v.clientErr
}
-func (v *validatingWebhookAccessor) GetCompiledMatcher(compiler cel.FilterCompiler, authorizer authorizer.Authorizer) matchconditions.Matcher {
+func (v *validatingWebhookAccessor) GetCompiledMatcher(compiler cel.FilterCompiler) matchconditions.Matcher {
v.compileMatcher.Do(func() {
expressions := make([]cel.ExpressionAccessor, len(v.ValidatingWebhook.MatchConditions))
for i, matchCondition := range v.ValidatingWebhook.MatchConditions {
@@ -268,8 +273,8 @@ func (v *validatingWebhookAccessor) GetCompiledMatcher(compiler cel.FilterCompil
HasParams: false,
HasAuthorizer: true,
},
- celconfig.PerCallLimit,
- ), authorizer, v.FailurePolicy, "validating", v.Name)
+ environment.StoredExpressions,
+ ), v.FailurePolicy, "webhook", "validating", v.Name)
})
return v.compiledMatcher
}
@@ -288,6 +293,10 @@ func (v *validatingWebhookAccessor) GetParsedObjectSelector() (labels.Selector,
return v.objectSelector, v.objectSelectorErr
}
+func (m *validatingWebhookAccessor) GetType() string {
+ return "validate"
+}
+
func (v *validatingWebhookAccessor) GetName() string {
return v.Name
}
diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/generic/webhook.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/generic/webhook.go
index a582898311..6a513f1c11 100644
--- a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/generic/webhook.go
+++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/generic/webhook.go
@@ -21,6 +21,9 @@ import (
"fmt"
"io"
+ admissionmetrics "k8s.io/apiserver/pkg/admission/metrics"
+ "k8s.io/klog/v2"
+
admissionv1 "k8s.io/api/admission/v1"
admissionv1beta1 "k8s.io/api/admission/v1beta1"
v1 "k8s.io/api/admissionregistration/v1"
@@ -35,10 +38,10 @@ import (
"k8s.io/apiserver/pkg/admission/plugin/webhook/predicates/object"
"k8s.io/apiserver/pkg/admission/plugin/webhook/predicates/rules"
"k8s.io/apiserver/pkg/authorization/authorizer"
+ "k8s.io/apiserver/pkg/cel/environment"
webhookutil "k8s.io/apiserver/pkg/util/webhook"
"k8s.io/client-go/informers"
clientset "k8s.io/client-go/kubernetes"
- "k8s.io/klog/v2"
)
// Webhook is an abstract admission plugin with all the infrastructure to define Admit or Validate on-top.
@@ -97,7 +100,7 @@ func NewWebhook(handler *admission.Handler, configFile io.Reader, sourceFactory
namespaceMatcher: &namespace.Matcher{},
objectMatcher: &object.Matcher{},
dispatcher: dispatcherFactory(&cm),
- filterCompiler: cel.NewFilterCompiler(),
+ filterCompiler: cel.NewFilterCompiler(environment.MustBaseEnvSet(environment.DefaultCompatibilityVersion())),
}, nil
}
@@ -216,7 +219,6 @@ func (a *Webhook) ShouldCallHook(ctx context.Context, h webhook.WebhookAccessor,
if matchObjErr != nil {
return nil, matchObjErr
}
-
matchConditions := h.GetMatchConditions()
if len(matchConditions) > 0 {
versionedAttr, err := v.VersionedAttribute(invocation.Kind)
@@ -224,13 +226,14 @@ func (a *Webhook) ShouldCallHook(ctx context.Context, h webhook.WebhookAccessor,
return nil, apierrors.NewInternalError(err)
}
- matcher := h.GetCompiledMatcher(a.filterCompiler, a.authorizer)
- matchResult := matcher.Match(ctx, versionedAttr, nil)
+ matcher := h.GetCompiledMatcher(a.filterCompiler)
+ matchResult := matcher.Match(ctx, versionedAttr, nil, a.authorizer)
if matchResult.Error != nil {
klog.Warningf("Failed evaluating match conditions, failing closed %v: %v", h.GetName(), matchResult.Error)
return nil, apierrors.NewForbidden(attr.GetResource().GroupResource(), attr.GetName(), matchResult.Error)
} else if !matchResult.Matches {
+ admissionmetrics.Metrics.ObserveMatchConditionExclusion(ctx, h.GetName(), "webhook", h.GetType(), string(attr.GetOperation()))
// if no match, always skip webhook
return nil, nil
}
diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/matchconditions/interface.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/matchconditions/interface.go
index 09468655bd..094a019d1f 100644
--- a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/matchconditions/interface.go
+++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/matchconditions/interface.go
@@ -21,6 +21,7 @@ import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apiserver/pkg/admission"
+ "k8s.io/apiserver/pkg/authorization/authorizer"
)
type MatchResult struct {
@@ -32,5 +33,5 @@ type MatchResult struct {
// Matcher contains logic for converting Evaluations to bool of matches or does not match
type Matcher interface {
// Match is used to take cel evaluations and convert into decisions
- Match(ctx context.Context, versionedAttr *admission.VersionedAttributes, versionedParams runtime.Object) MatchResult
+ Match(ctx context.Context, versionedAttr *admission.VersionedAttributes, versionedParams runtime.Object, authz authorizer.Authorizer) MatchResult
}
diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/matchconditions/matcher.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/matchconditions/matcher.go
index 09a500dd39..21dd28f6c2 100644
--- a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/matchconditions/matcher.go
+++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/matchconditions/matcher.go
@@ -20,11 +20,13 @@ import (
"context"
"errors"
"fmt"
+ "time"
"github.com/google/cel-go/cel"
celtypes "github.com/google/cel-go/common/types"
v1 "k8s.io/api/admissionregistration/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apiserver/pkg/admission"
@@ -53,13 +55,13 @@ var _ Matcher = &matcher{}
// matcher evaluates compiled cel expressions and determines if they match the given request or not
type matcher struct {
filter celplugin.Filter
- authorizer authorizer.Authorizer
failPolicy v1.FailurePolicyType
matcherType string
+ matcherKind string
objectName string
}
-func NewMatcher(filter celplugin.Filter, authorizer authorizer.Authorizer, failPolicy *v1.FailurePolicyType, matcherType, objectName string) Matcher {
+func NewMatcher(filter celplugin.Filter, failPolicy *v1.FailurePolicyType, matcherKind, matcherType, objectName string) Matcher {
var f v1.FailurePolicyType
if failPolicy == nil {
f = v1.Fail
@@ -68,20 +70,22 @@ func NewMatcher(filter celplugin.Filter, authorizer authorizer.Authorizer, failP
}
return &matcher{
filter: filter,
- authorizer: authorizer,
failPolicy: f,
+ matcherKind: matcherKind,
matcherType: matcherType,
objectName: objectName,
}
}
-func (m *matcher) Match(ctx context.Context, versionedAttr *admission.VersionedAttributes, versionedParams runtime.Object) MatchResult {
- evalResults, _, err := m.filter.ForInput(ctx, versionedAttr, celplugin.CreateAdmissionRequest(versionedAttr.Attributes), celplugin.OptionalVariableBindings{
+func (m *matcher) Match(ctx context.Context, versionedAttr *admission.VersionedAttributes, versionedParams runtime.Object, authz authorizer.Authorizer) MatchResult {
+ t := time.Now()
+ evalResults, _, err := m.filter.ForInput(ctx, versionedAttr, celplugin.CreateAdmissionRequest(versionedAttr.Attributes, metav1.GroupVersionResource(versionedAttr.GetResource()), metav1.GroupVersionKind(versionedAttr.VersionedKind)), celplugin.OptionalVariableBindings{
VersionedParams: versionedParams,
- Authorizer: m.authorizer,
- }, celconfig.RuntimeCELCostBudgetMatchConditions)
+ Authorizer: authz,
+ }, nil, celconfig.RuntimeCELCostBudgetMatchConditions)
if err != nil {
+ admissionmetrics.Metrics.ObserveMatchConditionEvaluationTime(ctx, time.Since(t), m.objectName, m.matcherKind, m.matcherType, string(versionedAttr.GetOperation()))
// filter returning error is unexpected and not an evaluation error so not incrementing metric here
if m.failPolicy == v1.Fail {
return MatchResult{
@@ -106,10 +110,10 @@ func (m *matcher) Match(ctx context.Context, versionedAttr *admission.VersionedA
}
if evalResult.Error != nil {
errorList = append(errorList, evalResult.Error)
- //TODO: what's the best way to handle this metric since its reused by VAP for match conditions
- admissionmetrics.Metrics.ObserveMatchConditionEvalError(ctx, m.objectName, m.matcherType)
+ admissionmetrics.Metrics.ObserveMatchConditionEvalError(ctx, m.objectName, m.matcherKind, m.matcherType, string(versionedAttr.GetOperation()))
}
if evalResult.EvalResult == celtypes.False {
+ admissionmetrics.Metrics.ObserveMatchConditionEvaluationTime(ctx, time.Since(t), m.objectName, m.matcherKind, m.matcherType, string(versionedAttr.GetOperation()))
// If any condition false, skip calling webhook always
return MatchResult{
Matches: false,
@@ -118,6 +122,7 @@ func (m *matcher) Match(ctx context.Context, versionedAttr *admission.VersionedA
}
}
if len(errorList) > 0 {
+ admissionmetrics.Metrics.ObserveMatchConditionEvaluationTime(ctx, time.Since(t), m.objectName, m.matcherKind, m.matcherType, string(versionedAttr.GetOperation()))
// If mix of true and eval errors then resort to fail policy
if m.failPolicy == v1.Fail {
// mix of true and errors with fail policy fail should fail request without calling webhook
diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/dispatcher.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/dispatcher.go
index 437e53dc93..af237ae0c0 100644
--- a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/dispatcher.go
+++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating/dispatcher.go
@@ -20,7 +20,6 @@ package mutating
import (
"context"
- "errors"
"fmt"
"time"
@@ -169,12 +168,13 @@ func (a *mutatingDispatcher) Dispatch(ctx context.Context, attr admission.Attrib
if err != nil {
switch err := err.(type) {
case *webhookutil.ErrCallingWebhook:
+ if ctx.Err() == context.Canceled {
+ klog.Warningf("Context Canceled when calling webhook %v", hook.Name)
+ return err
+ }
if !ignoreClientCallFailures {
rejected = true
- // Ignore context cancelled from webhook metrics
- if !errors.Is(err.Reason, context.Canceled) {
- admissionmetrics.Metrics.ObserveWebhookRejection(ctx, hook.Name, "admit", string(versionedAttr.Attributes.GetOperation()), admissionmetrics.WebhookRejectionCallingWebhookError, int(err.Status.ErrStatus.Code))
- }
+ admissionmetrics.Metrics.ObserveWebhookRejection(ctx, hook.Name, "admit", string(versionedAttr.Attributes.GetOperation()), admissionmetrics.WebhookRejectionCallingWebhookError, int(err.Status.ErrStatus.Code))
}
admissionmetrics.Metrics.ObserveWebhook(ctx, hook.Name, time.Since(t), rejected, versionedAttr.Attributes, "admit", int(err.Status.ErrStatus.Code))
case *webhookutil.ErrWebhookRejection:
@@ -203,14 +203,10 @@ func (a *mutatingDispatcher) Dispatch(ctx context.Context, attr admission.Attrib
if callErr, ok := err.(*webhookutil.ErrCallingWebhook); ok {
if ignoreClientCallFailures {
- // Ignore context cancelled from webhook metrics
- if errors.Is(callErr.Reason, context.Canceled) {
- klog.Warningf("Context canceled when calling webhook %v", hook.Name)
- } else {
- klog.Warningf("Failed calling webhook, failing open %v: %v", hook.Name, callErr)
- admissionmetrics.Metrics.ObserveWebhookFailOpen(ctx, hook.Name, "admit")
- annotator.addFailedOpenAnnotation()
- }
+ klog.Warningf("Failed calling webhook, failing open %v: %v", hook.Name, callErr)
+ admissionmetrics.Metrics.ObserveWebhookFailOpen(ctx, hook.Name, "admit")
+ annotator.addFailedOpenAnnotation()
+
utilruntime.HandleError(callErr)
select {
diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/predicates/namespace/matcher.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/predicates/namespace/matcher.go
index 459e3f5df6..6427bc6748 100644
--- a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/predicates/namespace/matcher.go
+++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/predicates/namespace/matcher.go
@@ -20,6 +20,8 @@ import (
"context"
"fmt"
+ v1 "k8s.io/api/core/v1"
+
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -42,6 +44,10 @@ type Matcher struct {
Client clientset.Interface
}
+func (m *Matcher) GetNamespace(name string) (*v1.Namespace, error) {
+ return m.NamespaceLister.Get(name)
+}
+
// Validate checks if the Matcher has a NamespaceLister and Client.
func (m *Matcher) Validate() error {
var errs []error
diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/validating/dispatcher.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/validating/dispatcher.go
index 1a633d2417..af435649bd 100644
--- a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/validating/dispatcher.go
+++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/validating/dispatcher.go
@@ -18,7 +18,6 @@ package validating
import (
"context"
- "errors"
"fmt"
"sync"
"time"
@@ -174,12 +173,13 @@ func (d *validatingDispatcher) Dispatch(ctx context.Context, attr admission.Attr
if err != nil {
switch err := err.(type) {
case *webhookutil.ErrCallingWebhook:
+ if ctx.Err() == context.Canceled {
+ klog.Warningf("Context Canceled when calling webhook %v", hook.Name)
+ return
+ }
if !ignoreClientCallFailures {
rejected = true
- // Ignore context cancelled from webhook metrics
- if !errors.Is(err.Reason, context.Canceled) {
- admissionmetrics.Metrics.ObserveWebhookRejection(ctx, hook.Name, "validating", string(versionedAttr.Attributes.GetOperation()), admissionmetrics.WebhookRejectionCallingWebhookError, int(err.Status.ErrStatus.Code))
- }
+ admissionmetrics.Metrics.ObserveWebhookRejection(ctx, hook.Name, "validating", string(versionedAttr.Attributes.GetOperation()), admissionmetrics.WebhookRejectionCallingWebhookError, int(err.Status.ErrStatus.Code))
}
admissionmetrics.Metrics.ObserveWebhook(ctx, hook.Name, time.Since(t), rejected, versionedAttr.Attributes, "validating", int(err.Status.ErrStatus.Code))
case *webhookutil.ErrWebhookRejection:
@@ -198,17 +198,12 @@ func (d *validatingDispatcher) Dispatch(ctx context.Context, attr admission.Attr
if callErr, ok := err.(*webhookutil.ErrCallingWebhook); ok {
if ignoreClientCallFailures {
- // Ignore context cancelled from webhook metrics
- if errors.Is(callErr.Reason, context.Canceled) {
- klog.Warningf("Context canceled when calling webhook %v", hook.Name)
- } else {
- klog.Warningf("Failed calling webhook, failing open %v: %v", hook.Name, callErr)
- admissionmetrics.Metrics.ObserveWebhookFailOpen(ctx, hook.Name, "validating")
- key := fmt.Sprintf("%sround_0_index_%d", ValidatingAuditAnnotationFailedOpenKeyPrefix, idx)
- value := hook.Name
- if err := versionedAttr.Attributes.AddAnnotation(key, value); err != nil {
- klog.Warningf("Failed to set admission audit annotation %s to %s for validating webhook %s: %v", key, value, hook.Name, err)
- }
+ klog.Warningf("Failed calling webhook, failing open %v: %v", hook.Name, callErr)
+ admissionmetrics.Metrics.ObserveWebhookFailOpen(ctx, hook.Name, "validating")
+ key := fmt.Sprintf("%sround_0_index_%d", ValidatingAuditAnnotationFailedOpenKeyPrefix, idx)
+ value := hook.Name
+ if err := versionedAttr.Attributes.AddAnnotation(key, value); err != nil {
+ klog.Warningf("Failed to set admission audit annotation %s to %s for validating webhook %s: %v", key, value, hook.Name, err)
}
utilruntime.HandleError(callErr)
return
diff --git a/vendor/k8s.io/apiserver/pkg/apis/flowcontrol/bootstrap/default.go b/vendor/k8s.io/apiserver/pkg/apis/flowcontrol/bootstrap/default.go
index 3859a54d1f..b037371e3a 100644
--- a/vendor/k8s.io/apiserver/pkg/apis/flowcontrol/bootstrap/default.go
+++ b/vendor/k8s.io/apiserver/pkg/apis/flowcontrol/bootstrap/default.go
@@ -89,6 +89,10 @@ var (
flowcontrol.PriorityLevelConfigurationNameExempt,
flowcontrol.PriorityLevelConfigurationSpec{
Type: flowcontrol.PriorityLevelEnablementExempt,
+ Exempt: &flowcontrol.ExemptPriorityLevelConfiguration{
+ NominalConcurrencyShares: pointer.Int32(0),
+ LendablePercent: pointer.Int32(0),
+ },
},
)
MandatoryPriorityLevelConfigurationCatchAll = newPriorityLevelConfiguration(
diff --git a/vendor/k8s.io/apiserver/pkg/audit/context.go b/vendor/k8s.io/apiserver/pkg/audit/context.go
index 95a18bcd5c..9648587378 100644
--- a/vendor/k8s.io/apiserver/pkg/audit/context.go
+++ b/vendor/k8s.io/apiserver/pkg/audit/context.go
@@ -39,21 +39,18 @@ type AuditContext struct {
RequestAuditConfig RequestAuditConfig
// Event is the audit Event object that is being captured to be written in
- // the API audit log. It is set to nil when the request is not being audited.
- Event *auditinternal.Event
+ // the API audit log.
+ Event auditinternal.Event
- // annotations holds audit annotations that are recorded before the event has been initialized.
- // This is represented as a slice rather than a map to preserve order.
- annotations []annotation
- // annotationMutex guards annotations AND event.Annotations
+ // annotationMutex guards event.Annotations
annotationMutex sync.Mutex
-
- // auditID is the Audit ID associated with this request.
- auditID types.UID
}
-type annotation struct {
- key, value string
+// Enabled checks whether auditing is enabled for this audit context.
+func (ac *AuditContext) Enabled() bool {
+ // Note: An unset Level should be considered Enabled, so that request data (e.g. annotations)
+ // can still be captured before the audit policy is evaluated.
+ return ac != nil && ac.RequestAuditConfig.Level != auditinternal.LevelNone
}
// AddAuditAnnotation sets the audit annotation for the given key, value pair.
@@ -65,8 +62,7 @@ type annotation struct {
// prefer AddAuditAnnotation over LogAnnotation to avoid dropping annotations.
func AddAuditAnnotation(ctx context.Context, key, value string) {
ac := AuditContextFrom(ctx)
- if ac == nil {
- // auditing is not enabled
+ if !ac.Enabled() {
return
}
@@ -81,8 +77,7 @@ func AddAuditAnnotation(ctx context.Context, key, value string) {
// keysAndValues are the key-value pairs to add, and must have an even number of items.
func AddAuditAnnotations(ctx context.Context, keysAndValues ...string) {
ac := AuditContextFrom(ctx)
- if ac == nil {
- // auditing is not enabled
+ if !ac.Enabled() {
return
}
@@ -101,8 +96,7 @@ func AddAuditAnnotations(ctx context.Context, keysAndValues ...string) {
// restrictions on when this can be called.
func AddAuditAnnotationsMap(ctx context.Context, annotations map[string]string) {
ac := AuditContextFrom(ctx)
- if ac == nil {
- // auditing is not enabled
+ if !ac.Enabled() {
return
}
@@ -114,38 +108,10 @@ func AddAuditAnnotationsMap(ctx context.Context, annotations map[string]string)
}
}
-// addAuditAnnotationLocked is the shared code for recording an audit annotation. This method should
-// only be called while the auditAnnotationsMutex is locked.
+// addAuditAnnotationLocked records the audit annotation on the event.
func addAuditAnnotationLocked(ac *AuditContext, key, value string) {
- if ac.Event != nil {
- logAnnotation(ac.Event, key, value)
- } else {
- ac.annotations = append(ac.annotations, annotation{key: key, value: value})
- }
-}
-
-// This is private to prevent reads/write to the slice from outside of this package.
-// The audit event should be directly read to get access to the annotations.
-func addAuditAnnotationsFrom(ctx context.Context, ev *auditinternal.Event) {
- ac := AuditContextFrom(ctx)
- if ac == nil {
- // auditing is not enabled
- return
- }
-
- ac.annotationMutex.Lock()
- defer ac.annotationMutex.Unlock()
+ ae := &ac.Event
- for _, kv := range ac.annotations {
- logAnnotation(ev, kv.key, kv.value)
- }
-}
-
-// LogAnnotation fills in the Annotations according to the key value pair.
-func logAnnotation(ae *auditinternal.Event, key, value string) {
- if ae == nil || ae.Level.Less(auditinternal.LevelMetadata) {
- return
- }
if ae.Annotations == nil {
ae.Annotations = make(map[string]string)
}
@@ -167,8 +133,8 @@ func WithAuditContext(parent context.Context) context.Context {
// AuditEventFrom returns the audit event struct on the ctx
func AuditEventFrom(ctx context.Context) *auditinternal.Event {
- if o := AuditContextFrom(ctx); o != nil {
- return o.Event
+ if ac := AuditContextFrom(ctx); ac.Enabled() {
+ return &ac.Event
}
return nil
}
@@ -187,20 +153,16 @@ func WithAuditID(ctx context.Context, auditID types.UID) {
if auditID == "" {
return
}
- ac := AuditContextFrom(ctx)
- if ac == nil {
- return
- }
- ac.auditID = auditID
- if ac.Event != nil {
+ if ac := AuditContextFrom(ctx); ac != nil {
ac.Event.AuditID = auditID
}
}
-// AuditIDFrom returns the value of the audit ID from the request context.
+// AuditIDFrom returns the value of the audit ID from the request context, along with whether
+// auditing is enabled.
func AuditIDFrom(ctx context.Context) (types.UID, bool) {
if ac := AuditContextFrom(ctx); ac != nil {
- return ac.auditID, ac.auditID != ""
+ return ac.Event.AuditID, true
}
return "", false
}
diff --git a/vendor/k8s.io/apiserver/pkg/audit/request.go b/vendor/k8s.io/apiserver/pkg/audit/request.go
index 972669536e..9185278f06 100644
--- a/vendor/k8s.io/apiserver/pkg/audit/request.go
+++ b/vendor/k8s.io/apiserver/pkg/audit/request.go
@@ -28,14 +28,11 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
- "k8s.io/apimachinery/pkg/types"
utilnet "k8s.io/apimachinery/pkg/util/net"
auditinternal "k8s.io/apiserver/pkg/apis/audit"
"k8s.io/apiserver/pkg/authentication/user"
"k8s.io/apiserver/pkg/authorization/authorizer"
"k8s.io/klog/v2"
-
- "github.com/google/uuid"
)
const (
@@ -43,20 +40,18 @@ const (
userAgentTruncateSuffix = "...TRUNCATED"
)
-func NewEventFromRequest(req *http.Request, requestReceivedTimestamp time.Time, level auditinternal.Level, attribs authorizer.Attributes) (*auditinternal.Event, error) {
- ev := &auditinternal.Event{
- RequestReceivedTimestamp: metav1.NewMicroTime(requestReceivedTimestamp),
- Verb: attribs.GetVerb(),
- RequestURI: req.URL.RequestURI(),
- UserAgent: maybeTruncateUserAgent(req),
- Level: level,
+func LogRequestMetadata(ctx context.Context, req *http.Request, requestReceivedTimestamp time.Time, level auditinternal.Level, attribs authorizer.Attributes) {
+ ac := AuditContextFrom(ctx)
+ if !ac.Enabled() {
+ return
}
+ ev := &ac.Event
- auditID, found := AuditIDFrom(req.Context())
- if !found {
- auditID = types.UID(uuid.New().String())
- }
- ev.AuditID = auditID
+ ev.RequestReceivedTimestamp = metav1.NewMicroTime(requestReceivedTimestamp)
+ ev.Verb = attribs.GetVerb()
+ ev.RequestURI = req.URL.RequestURI()
+ ev.UserAgent = maybeTruncateUserAgent(req)
+ ev.Level = level
ips := utilnet.SourceIPs(req)
ev.SourceIPs = make([]string, len(ips))
@@ -84,10 +79,6 @@ func NewEventFromRequest(req *http.Request, requestReceivedTimestamp time.Time,
APIVersion: attribs.GetAPIVersion(),
}
}
-
- addAuditAnnotationsFrom(req.Context(), ev)
-
- return ev, nil
}
// LogImpersonatedUser fills in the impersonated user attributes into an audit event.
diff --git a/vendor/k8s.io/apiserver/pkg/authentication/request/websocket/protocol.go b/vendor/k8s.io/apiserver/pkg/authentication/request/websocket/protocol.go
index 11afa84cbd..ee8c89f5ce 100644
--- a/vendor/k8s.io/apiserver/pkg/authentication/request/websocket/protocol.go
+++ b/vendor/k8s.io/apiserver/pkg/authentication/request/websocket/protocol.go
@@ -24,8 +24,8 @@ import (
"strings"
"unicode/utf8"
+ "k8s.io/apimachinery/pkg/util/httpstream/wsstream"
"k8s.io/apiserver/pkg/authentication/authenticator"
- "k8s.io/apiserver/pkg/util/wsstream"
)
const bearerProtocolPrefix = "base64url.bearer.authorization.k8s.io."
diff --git a/vendor/k8s.io/apiserver/pkg/authentication/token/cache/cached_token_authenticator.go b/vendor/k8s.io/apiserver/pkg/authentication/token/cache/cached_token_authenticator.go
index ec0b14768d..18167dddc2 100644
--- a/vendor/k8s.io/apiserver/pkg/authentication/token/cache/cached_token_authenticator.go
+++ b/vendor/k8s.io/apiserver/pkg/authentication/token/cache/cached_token_authenticator.go
@@ -197,15 +197,14 @@ func (a *cachedTokenAuthenticator) doAuthenticateToken(ctx context.Context, toke
recorder := &recorder{}
ctx = warning.WithWarningRecorder(ctx, recorder)
- // since this is shared work between multiple requests, we have no way of knowing if any
- // particular request supports audit annotations. thus we always attempt to record them.
- ev := &auditinternal.Event{Level: auditinternal.LevelMetadata}
ctx = audit.WithAuditContext(ctx)
ac := audit.AuditContextFrom(ctx)
- ac.Event = ev
+ // since this is shared work between multiple requests, we have no way of knowing if any
+ // particular request supports audit annotations. thus we always attempt to record them.
+ ac.Event.Level = auditinternal.LevelMetadata
record.resp, record.ok, record.err = a.authenticator.AuthenticateToken(ctx, token)
- record.annotations = ev.Annotations
+ record.annotations = ac.Event.Annotations
record.warnings = recorder.extractWarnings()
if !a.cacheErrs && record.err != nil {
diff --git a/vendor/k8s.io/apiserver/pkg/cel/composited.go b/vendor/k8s.io/apiserver/pkg/cel/composited.go
deleted file mode 100644
index 9e5e634d0c..0000000000
--- a/vendor/k8s.io/apiserver/pkg/cel/composited.go
+++ /dev/null
@@ -1,119 +0,0 @@
-/*
-Copyright 2023 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package cel
-
-import (
- "github.com/google/cel-go/common/types/ref"
- exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
-)
-
-var _ ref.TypeProvider = (*CompositedTypeProvider)(nil)
-var _ ref.TypeAdapter = (*CompositedTypeAdapter)(nil)
-
-// CompositedTypeProvider is the provider that tries each of the underlying
-// providers in order, and returns result of the first successful attempt.
-type CompositedTypeProvider struct {
- // Providers contains the underlying type providers.
- // If Providers is empty, the CompositedTypeProvider becomes no-op provider.
- Providers []ref.TypeProvider
-}
-
-// EnumValue finds out the numeric value of the given enum name.
-// The result comes from first provider that returns non-nil.
-func (c *CompositedTypeProvider) EnumValue(enumName string) ref.Val {
- for _, p := range c.Providers {
- val := p.EnumValue(enumName)
- if val != nil {
- return val
- }
- }
- return nil
-}
-
-// FindIdent takes a qualified identifier name and returns a Value if one
-// exists. The result comes from first provider that returns non-nil.
-func (c *CompositedTypeProvider) FindIdent(identName string) (ref.Val, bool) {
- for _, p := range c.Providers {
- val, ok := p.FindIdent(identName)
- if ok {
- return val, ok
- }
- }
- return nil, false
-}
-
-// FindType finds the Type given a qualified type name, or return false
-// if none of the providers finds the type.
-// If any of the providers find the type, the first provider that returns true
-// will be the result.
-func (c *CompositedTypeProvider) FindType(typeName string) (*exprpb.Type, bool) {
- for _, p := range c.Providers {
- typ, ok := p.FindType(typeName)
- if ok {
- return typ, ok
- }
- }
- return nil, false
-}
-
-// FindFieldType returns the field type for a checked type value. Returns
-// false if none of the providers can find the type.
-// If multiple providers can find the field, the result is taken from
-// the first that does.
-func (c *CompositedTypeProvider) FindFieldType(messageType string, fieldName string) (*ref.FieldType, bool) {
- for _, p := range c.Providers {
- ft, ok := p.FindFieldType(messageType, fieldName)
- if ok {
- return ft, ok
- }
- }
- return nil, false
-}
-
-// NewValue creates a new type value from a qualified name and map of field
-// name to value.
-// If multiple providers can create the new type, the first that returns
-// non-nil will decide the result.
-func (c *CompositedTypeProvider) NewValue(typeName string, fields map[string]ref.Val) ref.Val {
- for _, p := range c.Providers {
- v := p.NewValue(typeName, fields)
- if v != nil {
- return v
- }
- }
- return nil
-}
-
-// CompositedTypeAdapter is the adapter that tries each of the underlying
-// type adapter in order until the first successfully conversion.
-type CompositedTypeAdapter struct {
- // Adapters contains underlying type adapters.
- // If Adapters is empty, the CompositedTypeAdapter becomes a no-op adapter.
- Adapters []ref.TypeAdapter
-}
-
-// NativeToValue takes the value and convert it into a ref.Val
-// The result comes from the first TypeAdapter that returns non-nil.
-func (c *CompositedTypeAdapter) NativeToValue(value interface{}) ref.Val {
- for _, a := range c.Adapters {
- v := a.NativeToValue(value)
- if v != nil {
- return v
- }
- }
- return nil
-}
diff --git a/vendor/k8s.io/apiserver/pkg/cel/environment/base.go b/vendor/k8s.io/apiserver/pkg/cel/environment/base.go
new file mode 100644
index 0000000000..ed0d340411
--- /dev/null
+++ b/vendor/k8s.io/apiserver/pkg/cel/environment/base.go
@@ -0,0 +1,119 @@
+/*
+Copyright 2023 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package environment
+
+import (
+ "fmt"
+ "strconv"
+ "sync"
+
+ "github.com/google/cel-go/cel"
+ "github.com/google/cel-go/ext"
+ "golang.org/x/sync/singleflight"
+
+ "k8s.io/apimachinery/pkg/util/version"
+ celconfig "k8s.io/apiserver/pkg/apis/cel"
+ "k8s.io/apiserver/pkg/cel/library"
+)
+
+// DefaultCompatibilityVersion returns a default compatibility version for use with EnvSet
+// that guarantees compatibility with CEL features/libraries/parameters understood by
+// an n-1 version
+//
+// This default will be set to no more than n-1 the current Kubernetes major.minor version.
+//
+// Note that a default version number less than n-1 indicates a wider range of version
+// compatibility than strictly required for rollback. A wide range of compatibility is
+// desirable because it means that CEL expressions are portable across a wider range
+// of Kubernetes versions.
+func DefaultCompatibilityVersion() *version.Version {
+ return version.MajorMinor(1, 27)
+}
+
+var baseOpts = []VersionedOptions{
+ {
+ // CEL epoch was actually 1.23, but we artificially set it to 1.0 because these
+ // options should always be present.
+ IntroducedVersion: version.MajorMinor(1, 0),
+ EnvOptions: []cel.EnvOption{
+ cel.HomogeneousAggregateLiterals(),
+ // Validate function declarations once during base env initialization,
+ // so they don't need to be evaluated each time a CEL rule is compiled.
+ // This is a relatively expensive operation.
+ cel.EagerlyValidateDeclarations(true),
+ cel.DefaultUTCTimeZone(true),
+
+ ext.Strings(ext.StringsVersion(0)),
+ library.URLs(),
+ library.Regex(),
+ library.Lists(),
+ },
+ ProgramOptions: []cel.ProgramOption{
+ cel.EvalOptions(cel.OptOptimize, cel.OptTrackCost),
+ cel.CostLimit(celconfig.PerCallLimit),
+ },
+ },
+ {
+ IntroducedVersion: version.MajorMinor(1, 27),
+ EnvOptions: []cel.EnvOption{
+ library.Authz(),
+ },
+ },
+ {
+ IntroducedVersion: version.MajorMinor(1, 28),
+ EnvOptions: []cel.EnvOption{
+ cel.CrossTypeNumericComparisons(true),
+ cel.OptionalTypes(),
+ library.Quantity(),
+ },
+ },
+ // TODO: switch to ext.Strings version 2 once format() is fixed to work with HomogeneousAggregateLiterals.
+}
+
+// MustBaseEnvSet returns the common CEL base environments for Kubernetes for Version, or panics
+// if the version is nil, or does not have major and minor components.
+//
+// The returned environment contains function libraries, language settings, optimizations and
+// runtime cost limits appropriate CEL as it is used in Kubernetes.
+//
+// The returned environment contains no CEL variable definitions or custom type declarations and
+// should be extended to construct environments with the appropriate variable definitions,
+// type declarations and any other needed configuration.
+func MustBaseEnvSet(ver *version.Version) *EnvSet {
+ if ver == nil {
+ panic("version must be non-nil")
+ }
+ if len(ver.Components()) < 2 {
+ panic(fmt.Sprintf("version must contain an major and minor component, but got: %s", ver.String()))
+ }
+ key := strconv.FormatUint(uint64(ver.Major()), 10) + "." + strconv.FormatUint(uint64(ver.Minor()), 10)
+ if entry, ok := baseEnvs.Load(key); ok {
+ return entry.(*EnvSet)
+ }
+
+ entry, _, _ := baseEnvsSingleflight.Do(key, func() (interface{}, error) {
+ entry := mustNewEnvSet(ver, baseOpts)
+ baseEnvs.Store(key, entry)
+ return entry, nil
+ })
+ return entry.(*EnvSet)
+}
+
+var (
+ baseEnvs = sync.Map{}
+ baseEnvsSingleflight = &singleflight.Group{}
+)
diff --git a/vendor/k8s.io/apiserver/pkg/cel/environment/environment.go b/vendor/k8s.io/apiserver/pkg/cel/environment/environment.go
new file mode 100644
index 0000000000..b47bc8e984
--- /dev/null
+++ b/vendor/k8s.io/apiserver/pkg/cel/environment/environment.go
@@ -0,0 +1,274 @@
+/*
+Copyright 2023 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package environment
+
+import (
+ "fmt"
+ "math"
+
+ "github.com/google/cel-go/cel"
+
+ "k8s.io/apimachinery/pkg/util/version"
+ apiservercel "k8s.io/apiserver/pkg/cel"
+)
+
+// Type defines the different types of CEL environments used in Kubernetes.
+// CEL environments are used to compile and evaluate CEL expressions.
+// Environments include:
+// - Function libraries
+// - Variables
+// - Types (both core CEL types and Kubernetes types)
+// - Other CEL environment and program options
+type Type string
+
+const (
+ // NewExpressions is used to validate new or modified expressions in
+ // requests that write expressions to API resources.
+ //
+ // This environment type is compatible with a specific Kubernetes
+ // major/minor version. To ensure safe rollback, this environment type
+ // may not include all the function libraries, variables, type declarations, and CEL
+ // language settings available in the StoredExpressions environment type.
+ //
+ // NewExpressions must be used to validate (parse, compile, type check)
+ // all new or modified CEL expressions before they are written to storage.
+ NewExpressions Type = "NewExpressions"
+
+ // StoredExpressions is used to compile and run CEL expressions that have been
+ // persisted to storage.
+ //
+ // This environment type is compatible with CEL expressions that have been
+ // persisted to storage by all known versions of Kubernetes. This is the most
+ // permissive environment available.
+ //
+ // StoredExpressions is appropriate for use with CEL expressions in
+ // configuration files.
+ StoredExpressions Type = "StoredExpressions"
+)
+
+// EnvSet manages the creation and extension of CEL environments. Each EnvSet contains
+// both an NewExpressions and StoredExpressions environment. EnvSets are created
+// and extended using VersionedOptions so that the EnvSet can prepare environments according
+// to what options were introduced at which versions.
+//
+// Each EnvSet is given a compatibility version when it is created, and prepares the
+// NewExpressions environment to be compatible with that version. The EnvSet also
+// prepares StoredExpressions to be compatible with all known versions of Kubernetes.
+type EnvSet struct {
+ // compatibilityVersion is the version that all configuration in
+ // the NewExpressions environment is compatible with.
+ compatibilityVersion *version.Version
+
+ // newExpressions is an environment containing only configuration
+ // in this EnvSet that is enabled at this compatibilityVersion.
+ newExpressions *cel.Env
+
+ // storedExpressions is an environment containing the latest configuration
+ // in this EnvSet.
+ storedExpressions *cel.Env
+}
+
+func newEnvSet(compatibilityVersion *version.Version, opts []VersionedOptions) (*EnvSet, error) {
+ base, err := cel.NewEnv()
+ if err != nil {
+ return nil, err
+ }
+ baseSet := EnvSet{compatibilityVersion: compatibilityVersion, newExpressions: base, storedExpressions: base}
+ return baseSet.Extend(opts...)
+}
+
+func mustNewEnvSet(ver *version.Version, opts []VersionedOptions) *EnvSet {
+ envSet, err := newEnvSet(ver, opts)
+ if err != nil {
+ panic(fmt.Sprintf("Default environment misconfigured: %v", err))
+ }
+ return envSet
+}
+
+// NewExpressionsEnv returns the NewExpressions environment Type for this EnvSet.
+// See NewExpressions for details.
+func (e *EnvSet) NewExpressionsEnv() *cel.Env {
+ return e.newExpressions
+}
+
+// StoredExpressionsEnv returns the StoredExpressions environment Type for this EnvSet.
+// See StoredExpressions for details.
+func (e *EnvSet) StoredExpressionsEnv() *cel.Env {
+ return e.storedExpressions
+}
+
+// Env returns the CEL environment for the given Type.
+func (e *EnvSet) Env(envType Type) (*cel.Env, error) {
+ switch envType {
+ case NewExpressions:
+ return e.newExpressions, nil
+ case StoredExpressions:
+ return e.storedExpressions, nil
+ default:
+ return nil, fmt.Errorf("unsupported environment type: %v", envType)
+ }
+}
+
+// VersionedOptions provides a set of CEL configuration options as well as the version the
+// options were introduced and, optionally, the version the options were removed.
+type VersionedOptions struct {
+ // IntroducedVersion is the version at which these options were introduced.
+ // The NewExpressions environment will only include options introduced at or before the
+ // compatibility version of the EnvSet.
+ //
+ // For example, to configure a CEL environment with an "object" variable bound to a
+ // resource kind, first create a DeclType from the groupVersionKind of the resource and then
+ // populate a VersionedOptions with the variable and the type:
+ //
+ // schema := schemaResolver.ResolveSchema(groupVersionKind)
+ // objectType := apiservercel.SchemaDeclType(schema, true)
+ // ...
+ // VersionOptions{
+ // IntroducedVersion: version.MajorMinor(1, 26),
+ // DeclTypes: []*apiservercel.DeclType{ objectType },
+ // EnvOptions: []cel.EnvOption{ cel.Variable("object", objectType.CelType()) },
+ // },
+ //
+ // To create an DeclType from a CRD, use a structural schema. For example:
+ //
+ // schema := structuralschema.NewStructural(crdJSONProps)
+ // objectType := apiservercel.SchemaDeclType(schema, true)
+ //
+ // Required.
+ IntroducedVersion *version.Version
+ // RemovedVersion is the version at which these options were removed.
+ // The NewExpressions environment will not include options removed at or before the
+ // compatibility version of the EnvSet.
+ //
+ // All option removals must be backward compatible; the removal must either be paired
+ // with a compatible replacement introduced at the same version, or the removal must be non-breaking.
+ // The StoredExpressions environment will not include removed options.
+ //
+ // A function library may be upgraded by setting the RemovedVersion of the old library
+ // to the same value as the IntroducedVersion of the new library. The new library must
+ // be backward compatible with the old library.
+ //
+ // For example:
+ //
+ // VersionOptions{
+ // IntroducedVersion: version.MajorMinor(1, 26), RemovedVersion: version.MajorMinor(1, 27),
+ // EnvOptions: []cel.EnvOption{ libraries.Example(libraries.ExampleVersion(1)) },
+ // },
+ // VersionOptions{
+ // IntroducedVersion: version.MajorMinor(1, 27),
+ // EnvOptions: []EnvOptions{ libraries.Example(libraries.ExampleVersion(2)) },
+ // },
+ //
+ // Optional.
+ RemovedVersion *version.Version
+
+ // EnvOptions provides CEL EnvOptions. This may be used to add a cel.Variable, a
+ // cel.Library, or to enable other CEL EnvOptions such as language settings.
+ //
+ // If an added cel.Variable has an OpenAPI type, the type must be included in DeclTypes.
+ EnvOptions []cel.EnvOption
+ // ProgramOptions provides CEL ProgramOptions. This may be used to set a cel.CostLimit,
+ // enable optimizations, and set other program level options that should be enabled
+ // for all programs using this environment.
+ ProgramOptions []cel.ProgramOption
+ // DeclTypes provides OpenAPI type declarations to register with the environment.
+ //
+ // If cel.Variables added to EnvOptions refer to a OpenAPI type, the type must be included in
+ // DeclTypes.
+ DeclTypes []*apiservercel.DeclType
+}
+
+// Extend returns an EnvSet based on this EnvSet but extended with given VersionedOptions.
+// This EnvSet is not mutated.
+// The returned EnvSet has the same compatibility version as the EnvSet that was extended.
+//
+// Extend is an expensive operation and each call to Extend that adds DeclTypes increases
+// the depth of a chain of resolvers. For these reasons, calls to Extend should be kept
+// to a minimum.
+//
+// Some best practices:
+//
+// - Minimize calls Extend when handling API requests. Where possible, call Extend
+// when initializing components.
+// - If an EnvSets returned by Extend can be used to compile multiple CEL programs,
+// call Extend once and reuse the returned EnvSets.
+// - Prefer a single call to Extend with a full list of VersionedOptions over
+// making multiple calls to Extend.
+func (e *EnvSet) Extend(options ...VersionedOptions) (*EnvSet, error) {
+ if len(options) > 0 {
+ newExprOpts, err := e.filterAndBuildOpts(e.newExpressions, e.compatibilityVersion, options)
+ if err != nil {
+ return nil, err
+ }
+ p, err := e.newExpressions.Extend(newExprOpts)
+ if err != nil {
+ return nil, err
+ }
+ storedExprOpt, err := e.filterAndBuildOpts(e.storedExpressions, version.MajorMinor(math.MaxUint, math.MaxUint), options)
+ if err != nil {
+ return nil, err
+ }
+ s, err := e.storedExpressions.Extend(storedExprOpt)
+ if err != nil {
+ return nil, err
+ }
+ return &EnvSet{compatibilityVersion: e.compatibilityVersion, newExpressions: p, storedExpressions: s}, nil
+ }
+ return e, nil
+}
+
+func (e *EnvSet) filterAndBuildOpts(base *cel.Env, compatVer *version.Version, opts []VersionedOptions) (cel.EnvOption, error) {
+ var envOpts []cel.EnvOption
+ var progOpts []cel.ProgramOption
+ var declTypes []*apiservercel.DeclType
+
+ for _, opt := range opts {
+ if compatVer.AtLeast(opt.IntroducedVersion) && (opt.RemovedVersion == nil || compatVer.LessThan(opt.RemovedVersion)) {
+ envOpts = append(envOpts, opt.EnvOptions...)
+ progOpts = append(progOpts, opt.ProgramOptions...)
+ declTypes = append(declTypes, opt.DeclTypes...)
+ }
+ }
+
+ if len(declTypes) > 0 {
+ provider := apiservercel.NewDeclTypeProvider(declTypes...)
+ providerOpts, err := provider.EnvOptions(base.TypeProvider())
+ if err != nil {
+ return nil, err
+ }
+ envOpts = append(envOpts, providerOpts...)
+ }
+
+ combined := cel.Lib(&envLoader{
+ envOpts: envOpts,
+ progOpts: progOpts,
+ })
+ return combined, nil
+}
+
+type envLoader struct {
+ envOpts []cel.EnvOption
+ progOpts []cel.ProgramOption
+}
+
+func (e *envLoader) CompileOptions() []cel.EnvOption {
+ return e.envOpts
+}
+
+func (e *envLoader) ProgramOptions() []cel.ProgramOption {
+ return e.progOpts
+}
diff --git a/vendor/k8s.io/apiserver/pkg/cel/lazy/lazy.go b/vendor/k8s.io/apiserver/pkg/cel/lazy/lazy.go
new file mode 100644
index 0000000000..1742deb0a2
--- /dev/null
+++ b/vendor/k8s.io/apiserver/pkg/cel/lazy/lazy.go
@@ -0,0 +1,191 @@
+/*
+Copyright 2023 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package lazy
+
+import (
+ "fmt"
+ "reflect"
+
+ "github.com/google/cel-go/common/types"
+ "github.com/google/cel-go/common/types/ref"
+ "github.com/google/cel-go/common/types/traits"
+
+ "k8s.io/apiserver/pkg/cel"
+)
+
+type GetFieldFunc func(*MapValue) ref.Val
+
+var _ ref.Val = (*MapValue)(nil)
+var _ traits.Mapper = (*MapValue)(nil)
+
+// MapValue is a map that lazily evaluate its value when a field is first accessed.
+// The map value is not designed to be thread-safe.
+type MapValue struct {
+ typeValue *types.TypeValue
+
+ // values are previously evaluated values obtained from callbacks
+ values map[string]ref.Val
+ // callbacks are a map of field name to the function that returns the field Val
+ callbacks map[string]GetFieldFunc
+ // knownValues are registered names, used for iteration
+ knownValues []string
+}
+
+func NewMapValue(objectType ref.Type) *MapValue {
+ return &MapValue{
+ typeValue: types.NewTypeValue(objectType.TypeName(), traits.IndexerType|traits.FieldTesterType|traits.IterableType),
+ values: map[string]ref.Val{},
+ callbacks: map[string]GetFieldFunc{},
+ }
+}
+
+// Append adds the given field with its name and callback.
+func (m *MapValue) Append(name string, callback GetFieldFunc) {
+ m.knownValues = append(m.knownValues, name)
+ m.callbacks[name] = callback
+}
+
+// Contains checks if the key is known to the map
+func (m *MapValue) Contains(key ref.Val) ref.Val {
+ v, found := m.Find(key)
+ if v != nil && types.IsUnknownOrError(v) {
+ return v
+ }
+ return types.Bool(found)
+}
+
+// Iterator returns an iterator to traverse the map.
+func (m *MapValue) Iterator() traits.Iterator {
+ return &iterator{parent: m, index: 0}
+}
+
+// Size returns the number of currently known fields
+func (m *MapValue) Size() ref.Val {
+ return types.Int(len(m.callbacks))
+}
+
+// ConvertToNative returns an error because it is disallowed
+func (m *MapValue) ConvertToNative(typeDesc reflect.Type) (any, error) {
+ return nil, fmt.Errorf("disallowed conversion from %q to %q", m.typeValue.TypeName(), typeDesc.Name())
+}
+
+// ConvertToType converts the map to the given type.
+// Only its own type and "Type" type are allowed.
+func (m *MapValue) ConvertToType(typeVal ref.Type) ref.Val {
+ switch typeVal {
+ case m.typeValue:
+ return m
+ case types.TypeType:
+ return m.typeValue
+ }
+ return types.NewErr("disallowed conversion from %q to %q", m.typeValue.TypeName(), typeVal.TypeName())
+}
+
+// Equal returns true if the other object is the same pointer-wise.
+func (m *MapValue) Equal(other ref.Val) ref.Val {
+ otherMap, ok := other.(*MapValue)
+ if !ok {
+ return types.MaybeNoSuchOverloadErr(other)
+ }
+ return types.Bool(m == otherMap)
+}
+
+// Type returns its registered type.
+func (m *MapValue) Type() ref.Type {
+ return m.typeValue
+}
+
+// Value is not allowed.
+func (m *MapValue) Value() any {
+ return types.NoSuchOverloadErr()
+}
+
+// resolveField resolves the field. Calls the callback if the value is not yet stored.
+func (m *MapValue) resolveField(name string) ref.Val {
+ v, seen := m.values[name]
+ if seen {
+ return v
+ }
+ f := m.callbacks[name]
+ v = f(m)
+ m.values[name] = v
+ return v
+}
+
+func (m *MapValue) Find(key ref.Val) (ref.Val, bool) {
+ n, ok := key.(types.String)
+ if !ok {
+ return types.MaybeNoSuchOverloadErr(n), true
+ }
+ name, ok := cel.Unescape(n.Value().(string))
+ if !ok {
+ return nil, false
+ }
+ if _, exists := m.callbacks[name]; !exists {
+ return nil, false
+ }
+ return m.resolveField(name), true
+}
+
+func (m *MapValue) Get(key ref.Val) ref.Val {
+ v, found := m.Find(key)
+ if found {
+ return v
+ }
+ return types.ValOrErr(key, "no such key: %v", key)
+}
+
+type iterator struct {
+ parent *MapValue
+ index int
+}
+
+func (i *iterator) ConvertToNative(typeDesc reflect.Type) (any, error) {
+ return nil, fmt.Errorf("disallowed conversion to %q", typeDesc.Name())
+}
+
+func (i *iterator) ConvertToType(typeValue ref.Type) ref.Val {
+ return types.NewErr("disallowed conversion o %q", typeValue.TypeName())
+}
+
+func (i *iterator) Equal(other ref.Val) ref.Val {
+ otherIterator, ok := other.(*iterator)
+ if !ok {
+ return types.MaybeNoSuchOverloadErr(other)
+ }
+ return types.Bool(otherIterator == i)
+}
+
+func (i *iterator) Type() ref.Type {
+ return types.IteratorType
+}
+
+func (i *iterator) Value() any {
+ return nil
+}
+
+func (i *iterator) HasNext() ref.Val {
+ return types.Bool(i.index < len(i.parent.knownValues))
+}
+
+func (i *iterator) Next() ref.Val {
+ ret := i.parent.Get(types.String(i.parent.knownValues[i.index]))
+ i.index++
+ return ret
+}
+
+var _ traits.Iterator = (*iterator)(nil)
diff --git a/vendor/k8s.io/apiserver/pkg/cel/library/authz.go b/vendor/k8s.io/apiserver/pkg/cel/library/authz.go
index 606e5769ad..00f0200e86 100644
--- a/vendor/k8s.io/apiserver/pkg/cel/library/authz.go
+++ b/vendor/k8s.io/apiserver/pkg/cel/library/authz.go
@@ -174,6 +174,26 @@ import (
// Examples:
//
// authorizer.path('/healthz').check('GET').reason()
+//
+// errored
+//
+// Returns true if the authorization check resulted in an error.
+//
+// .errored()
+//
+// Examples:
+//
+// authorizer.group('').resource('pods').namespace('default').check('create').errored() // Returns true if the authorization check resulted in an error
+//
+// error
+//
+// If the authorization check resulted in an error, returns the error. Otherwise, returns the empty string.
+//
+// .error()
+//
+// Examples:
+//
+// authorizer.group('').resource('pods').namespace('default').check('create').error()
func Authz() cel.EnvOption {
return cel.Lib(authzLib)
}
@@ -209,6 +229,12 @@ var authzLibraryDecls = map[string][]cel.FunctionOpt{
cel.BinaryBinding(pathCheckCheck)),
cel.MemberOverload("resourcecheck_check", []*cel.Type{ResourceCheckType, cel.StringType}, DecisionType,
cel.BinaryBinding(resourceCheckCheck))},
+ "errored": {
+ cel.MemberOverload("decision_errored", []*cel.Type{DecisionType}, cel.BoolType,
+ cel.UnaryBinding(decisionErrored))},
+ "error": {
+ cel.MemberOverload("decision_error", []*cel.Type{DecisionType}, cel.StringType,
+ cel.UnaryBinding(decisionError))},
"allowed": {
cel.MemberOverload("decision_allowed", []*cel.Type{DecisionType}, cel.BoolType,
cel.UnaryBinding(decisionAllowed))},
@@ -384,6 +410,27 @@ func resourceCheckCheck(arg1, arg2 ref.Val) ref.Val {
return resourceCheck.Authorize(context.TODO(), apiVerb)
}
+func decisionErrored(arg ref.Val) ref.Val {
+ decision, ok := arg.(decisionVal)
+ if !ok {
+ return types.MaybeNoSuchOverloadErr(arg)
+ }
+
+ return types.Bool(decision.err != nil)
+}
+
+func decisionError(arg ref.Val) ref.Val {
+ decision, ok := arg.(decisionVal)
+ if !ok {
+ return types.MaybeNoSuchOverloadErr(arg)
+ }
+
+ if decision.err == nil {
+ return types.String("")
+ }
+ return types.String(decision.err.Error())
+}
+
func decisionAllowed(arg ref.Val) ref.Val {
decision, ok := arg.(decisionVal)
if !ok {
@@ -478,10 +525,7 @@ func (a pathCheckVal) Authorize(ctx context.Context, verb string) ref.Val {
}
decision, reason, err := a.authorizer.authAuthorizer.Authorize(ctx, attr)
- if err != nil {
- return types.NewErr("error in authorization check: %v", err)
- }
- return newDecision(decision, reason)
+ return newDecision(decision, err, reason)
}
type groupCheckVal struct {
@@ -516,18 +560,16 @@ func (a resourceCheckVal) Authorize(ctx context.Context, verb string) ref.Val {
User: a.groupCheck.authorizer.userInfo,
}
decision, reason, err := a.groupCheck.authorizer.authAuthorizer.Authorize(ctx, attr)
- if err != nil {
- return types.NewErr("error in authorization check: %v", err)
- }
- return newDecision(decision, reason)
+ return newDecision(decision, err, reason)
}
-func newDecision(authDecision authorizer.Decision, reason string) decisionVal {
- return decisionVal{receiverOnlyObjectVal: receiverOnlyVal(DecisionType), authDecision: authDecision, reason: reason}
+func newDecision(authDecision authorizer.Decision, err error, reason string) decisionVal {
+ return decisionVal{receiverOnlyObjectVal: receiverOnlyVal(DecisionType), authDecision: authDecision, err: err, reason: reason}
}
type decisionVal struct {
receiverOnlyObjectVal
+ err error
authDecision authorizer.Decision
reason string
}
diff --git a/vendor/k8s.io/apiserver/pkg/cel/library/cost.go b/vendor/k8s.io/apiserver/pkg/cel/library/cost.go
index fe526398a5..5201d187be 100644
--- a/vendor/k8s.io/apiserver/pkg/cel/library/cost.go
+++ b/vendor/k8s.io/apiserver/pkg/cel/library/cost.go
@@ -41,7 +41,7 @@ func (l *CostEstimator) CallCost(function, overloadId string, args []ref.Val, re
// This cost is set to allow for only two authorization checks per expression
cost := uint64(350000)
return &cost
- case "serviceAccount", "path", "group", "resource", "subresource", "namespace", "name", "allowed", "denied", "reason":
+ case "serviceAccount", "path", "group", "resource", "subresource", "namespace", "name", "allowed", "reason", "error", "errored":
// All authorization builder and accessor functions have a nominal cost
cost := uint64(1)
return &cost
@@ -91,7 +91,7 @@ func (l *CostEstimator) EstimateCallCost(function, overloadId string, target *ch
// An authorization check has a fixed cost
// This cost is set to allow for only two authorization checks per expression
return &checker.CallEstimate{CostEstimate: checker.CostEstimate{Min: 350000, Max: 350000}}
- case "serviceAccount", "path", "group", "resource", "subresource", "namespace", "name", "allowed", "denied", "reason":
+ case "serviceAccount", "path", "group", "resource", "subresource", "namespace", "name", "allowed", "reason", "error", "errored":
// All authorization builder and accessor functions have a nominal cost
return &checker.CallEstimate{CostEstimate: checker.CostEstimate{Min: 1, Max: 1}}
case "isSorted", "sum", "max", "min", "indexOf", "lastIndexOf":
@@ -126,51 +126,13 @@ func (l *CostEstimator) EstimateCallCost(function, overloadId string, target *ch
sz := l.sizeEstimate(*target)
toReplaceSz := l.sizeEstimate(args[0])
replaceWithSz := l.sizeEstimate(args[1])
-
- var replaceCount, retainedSz checker.SizeEstimate
- // find the longest replacement:
- if toReplaceSz.Min == 0 {
- // if the string being replaced is empty, replace surrounds all characters in the input string with the replacement.
- if sz.Max < math.MaxUint64 {
- replaceCount.Max = sz.Max + 1
- } else {
- replaceCount.Max = sz.Max
- }
- // Include the length of the longest possible original string length.
- retainedSz.Max = sz.Max
- } else if replaceWithSz.Max <= toReplaceSz.Min {
- // If the replacement does not make the result longer, use the original string length.
- replaceCount.Max = 0
- retainedSz.Max = sz.Max
- } else {
- // Replace the smallest possible substrings with the largest possible replacement
- // as many times as possible.
- replaceCount.Max = uint64(math.Ceil(float64(sz.Max) / float64(toReplaceSz.Min)))
- }
-
- // find the shortest replacement:
- if toReplaceSz.Max == 0 {
- // if the string being replaced is empty, replace surrounds all characters in the input string with the replacement.
- if sz.Min < math.MaxUint64 {
- replaceCount.Min = sz.Min + 1
- } else {
- replaceCount.Min = sz.Min
- }
- // Include the length of the shortest possible original string length.
- retainedSz.Min = sz.Min
- } else if toReplaceSz.Max <= replaceWithSz.Min {
- // If the replacement does not make the result shorter, use the original string length.
- replaceCount.Min = 0
- retainedSz.Min = sz.Min
- } else {
- // Replace the largest possible substrings being with the smallest possible replacement
- // as many times as possible.
- replaceCount.Min = uint64(math.Ceil(float64(sz.Min) / float64(toReplaceSz.Max)))
- }
- size := replaceCount.Multiply(replaceWithSz).Add(retainedSz)
+ // smallest possible result: smallest input size composed of the largest possible substrings being replaced by smallest possible replacement
+ minSz := uint64(math.Ceil(float64(sz.Min)/float64(toReplaceSz.Max))) * replaceWithSz.Min
+ // largest possible result: largest input size composed of the smallest possible substrings being replaced by largest possible replacement
+ maxSz := uint64(math.Ceil(float64(sz.Max)/float64(toReplaceSz.Min))) * replaceWithSz.Max
// cost is the traversal plus the construction of the result
- return &checker.CallEstimate{CostEstimate: sz.MultiplyByCostFactor(2 * common.StringTraversalCostFactor), ResultSize: &size}
+ return &checker.CallEstimate{CostEstimate: sz.MultiplyByCostFactor(2 * common.StringTraversalCostFactor), ResultSize: &checker.SizeEstimate{Min: minSz, Max: maxSz}}
}
case "split":
if target != nil {
diff --git a/vendor/k8s.io/apiserver/pkg/cel/library/libraries.go b/vendor/k8s.io/apiserver/pkg/cel/library/libraries.go
deleted file mode 100644
index e2e8fc29bd..0000000000
--- a/vendor/k8s.io/apiserver/pkg/cel/library/libraries.go
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
-Copyright 2022 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package library
-
-import (
- "github.com/google/cel-go/cel"
- "github.com/google/cel-go/ext"
- "github.com/google/cel-go/interpreter"
-)
-
-// ExtensionLibs declares the set of CEL extension libraries available everywhere CEL is used in Kubernetes.
-var ExtensionLibs = append(k8sExtensionLibs, ext.Strings())
-
-var k8sExtensionLibs = []cel.EnvOption{
- URLs(),
- Regex(),
- Lists(),
- Authz(),
-}
-
-var ExtensionLibRegexOptimizations = []*interpreter.RegexOptimization{FindRegexOptimization, FindAllRegexOptimization}
diff --git a/vendor/k8s.io/apiserver/pkg/cel/library/quantity.go b/vendor/k8s.io/apiserver/pkg/cel/library/quantity.go
new file mode 100644
index 0000000000..49e3dae7cd
--- /dev/null
+++ b/vendor/k8s.io/apiserver/pkg/cel/library/quantity.go
@@ -0,0 +1,375 @@
+/*
+Copyright 2023 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package library
+
+import (
+ "errors"
+
+ "github.com/google/cel-go/cel"
+ "github.com/google/cel-go/common/types"
+ "github.com/google/cel-go/common/types/ref"
+ "k8s.io/apimachinery/pkg/api/resource"
+ apiservercel "k8s.io/apiserver/pkg/cel"
+)
+
+// Quantity provides a CEL function library extension of Kubernetes
+// resource.Quantity parsing functions. See `resource.Quantity`
+// documentation for more detailed information about the format itself:
+// https://pkg.go.dev/k8s.io/apimachinery/pkg/api/resource#Quantity
+//
+// quantity
+//
+// Converts a string to a Quantity or results in an error if the string is not a valid Quantity. Refer
+// to resource.Quantity documentation for information on accepted patterns.
+//
+// quantity()
+//
+// Examples:
+//
+// quantity('1.5G') // returns a Quantity
+// quantity('200k') // returns a Quantity
+// quantity('200K') // error
+// quantity('Three') // error
+// quantity('Mi') // error
+//
+// isQuantity
+//
+// Returns true if a string is a valid Quantity. isQuantity returns true if and
+// only if quantity does not result in error.
+//
+// isQuantity( )
+//
+// Examples:
+//
+// isQuantity('1.3G') // returns true
+// isQuantity('1.3Gi') // returns true
+// isQuantity('1,3G') // returns false
+// isQuantity('10000k') // returns true
+// isQuantity('200K') // returns false
+// isQuantity('Three') // returns false
+// isQuantity('Mi') // returns false
+//
+// Conversion to Scalars:
+//
+// - isInteger: returns true if and only if asInteger is safe to call without an error
+//
+// - asInteger: returns a representation of the current value as an int64 if
+// possible or results in an error if conversion would result in overflow
+// or loss of precision.
+//
+// - asApproximateFloat: returns a float64 representation of the quantity which may
+// lose precision. If the value of the quantity is outside the range of a float64
+// +Inf/-Inf will be returned.
+//
+// .isInteger()
+// .asInteger()
+// .asApproximateFloat()
+//
+// Examples:
+//
+// quantity("50000000G").isInteger() // returns true
+// quantity("50k").isInteger() // returns true
+// quantity("9999999999999999999999999999999999999G").asInteger() // error: cannot convert value to integer
+// quantity("9999999999999999999999999999999999999G").isInteger() // returns false
+// quantity("50k").asInteger() == 50000 // returns true
+// quantity("50k").sub(20000).asApproximateFloat() == 30000 // returns true
+//
+// Arithmetic
+//
+// - sign: Returns `1` if the quantity is positive, `-1` if it is negative. `0` if it is zero
+//
+// - add: Returns sum of two quantities or a quantity and an integer
+//
+// - sub: Returns difference between two quantities or a quantity and an integer
+//
+// .sign()
+// .add()
+// .add()
+// .sub()
+// .sub()
+//
+// Examples:
+//
+// quantity("50k").add("20k") == quantity("70k") // returns true
+// quantity("50k").add(20) == quantity("50020") // returns true
+// quantity("50k").sub("20k") == quantity("30k") // returns true
+// quantity("50k").sub(20000) == quantity("30k") // returns true
+// quantity("50k").add(20).sub(quantity("100k")).sub(-50000) == quantity("20") // returns true
+//
+// Comparisons
+//
+// - isGreaterThan: Returns true if and only if the receiver is greater than the operand
+//
+// - isLessThan: Returns true if and only if the receiver is less than the operand
+//
+// - compareTo: Compares receiver to operand and returns 0 if they are equal, 1 if the receiver is greater, or -1 if the receiver is less than the operand
+//
+//
+// .isLessThan()
+// .isGreaterThan(